complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
2
6
def on_predict_end(self, logs=None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_predict_end(logs)
keras/callbacks.py
51
keras
{ "docstring": "Calls the `on_predict_end` methods of its callbacks.\n\n Args:\n logs: Dict. Currently, no data is passed via this argument\n for this method, but that may change in the future.\n ", "language": "en", "n_whitespaces": 66, "n_words": 28, "vocab_size": 26 }
11
Python
11
84afc5193d38057e2e2badf9c889ea87d80d8fbf
callbacks.py
269,909
4
31
on_predict_end
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
43
0
80,323
9
3
5
def get_func(cls, key, **kwargs): if "agg_func" in kwargs: return cls.inplace_applyier_builder(key, kwargs["agg_func"]) elif "func_dict" in kwargs: return cls.inplace_applyier_builder(key, kwargs["func_dict"]) else: return cls.inplace_applyier_builder(key)
modin/core/dataframe/algebra/default2pandas/groupby.py
92
modin
{ "docstring": "\n Extract aggregation function from groupby arguments.\n\n Parameters\n ----------\n key : callable or str\n Default aggregation function. If aggregation function is not specified\n via groupby arguments, then `key` function is used.\n **kwargs : dict\n GroupBy arguments that may contain aggregation function.\n\n Returns\n -------\n callable\n Aggregation function.\n\n Notes\n -----\n There are two ways of how groupby aggregation can be invoked:\n 1. Explicitly with query compiler method: `qc.groupby_sum()`.\n 2. By passing aggregation function as an argument: `qc.groupby_agg(\"sum\")`.\n Both are going to produce the same result, however in the first case actual aggregation\n function can be extracted from the method name, while for the second only from the method arguments.\n ", "language": "en", "n_whitespaces": 271, "n_words": 106, "vocab_size": 78 }
21
Python
16
1e65a4afd191cf61ba05b80545d23f9b88962f41
groupby.py
153,097
7
54
get_func
https://github.com/modin-project/modin.git
FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373) Signed-off-by: Dmitry Chigarev <[email protected]>
82
0
35,257
12
2
30
def mock_json_schema(request, monkeypatch, tmp_path): # Do not patch integration tests if "integration" in request.keywords: return # Mock the subclasses list to make it very small, containing only mock nodes monkeypatch.setattr( haystack.nodes._json_schema, "find_subclasses_in_modules", lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)], ) # Point the JSON schema path to tmp_path monkeypatch.setattr(haystack.pipelines.config, "JSON_SCHEMAS_PATH", tmp_path) # Generate mock schema in tmp_path filename = f"haystack-pipeline-unstable.schema.json" test_schema = _json_schema.get_json_schema(filename=filename, compatible_versions=["unstable"]) with open(tmp_path / filename, "w") as schema_file: json.dump(test_schema, schema_file, indent=4) # # Integration # @pytest.mark.integration @pytest.mark.elasticsearch
test/test_pipeline_yaml.py
209
@pytest.mark.integration @pytest.mark.elasticsearch
haystack
{ "docstring": "\n JSON schema with the unstable version and only mocked nodes.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
82
Python
68
11cf94a9652a577732941f27ad59eb7c8bc5063e
test_pipeline_yaml.py
256,915
13
116
mock_json_schema
https://github.com/deepset-ai/haystack.git
Pipeline's YAML: syntax validation (#2226) * Add BasePipeline.validate_config, BasePipeline.validate_yaml, and some new custom exception classes * Make error composition work properly * Clarify typing * Help mypy a bit more * Update Documentation & Code Style * Enable autogenerated docs for Milvus1 and 2 separately * Revert "Enable autogenerated docs for Milvus1 and 2 separately" This reverts commit 282be4a78a6e95862a9b4c924fc3dea5ca71e28d. * Update Documentation & Code Style * Re-enable 'additionalProperties: False' * Add pipeline.type to JSON Schema, was somehow forgotten * Disable additionalProperties on the pipeline properties too * Fix json-schemas for 1.1.0 and 1.2.0 (should not do it again in the future) * Cal super in PipelineValidationError * Improve _read_pipeline_config_from_yaml's error handling * Fix generate_json_schema.py to include document stores * Fix json schemas (retro-fix 1.1.0 again) * Improve custom errors printing, add link to docs * Add function in BaseComponent to list its subclasses in a module * Make some document stores base classes abstract * Add marker 'integration' in pytest flags * Slighly improve validation of pipelines at load * Adding tests for YAML loading and validation * Make custom_query Optional for validation issues * Fix bug in _read_pipeline_config_from_yaml * Improve error handling in BasePipeline and Pipeline and add DAG check * Move json schema generation into haystack/nodes/_json_schema.py (useful for tests) * Simplify errors slightly * Add some YAML validation tests * Remove load_from_config from BasePipeline, it was never used anyway * Improve tests * Include json-schemas in package * Fix conftest imports * Make BasePipeline abstract * Improve mocking by making the test independent from the YAML version * Add exportable_to_yaml decorator to forget about set_config on mock nodes * Fix mypy errors * Comment out one monkeypatch * Fix typing again * Improve error message for validation * Add required properties to pipelines * Fix YAML version for REST API YAMLs to 1.2.0 * Fix load_from_yaml call in load_from_deepset_cloud * fix HaystackError.__getattr__ * Add super().__init__()in most nodes and docstore, comment set_config * Remove type from REST API pipelines * Remove useless init from doc2answers * Call super in Seq3SeqGenerator * Typo in deepsetcloud.py * Fix rest api indexing error mismatch and mock version of JSON schema in all tests * Working on pipeline tests * Improve errors printing slightly * Add back test_pipeline.yaml * _json_schema.py supports different versions with identical schemas * Add type to 0.7 schema for backwards compatibility * Fix small bug in _json_schema.py * Try alternative to generate json schemas on the CI * Update Documentation & Code Style * Make linux CI match autoformat CI * Fix super-init-not-called * Accidentally committed file * Update Documentation & Code Style * fix test_summarizer_translation.py's import * Mock YAML in a few suites, split and simplify test_pipeline_debug_and_validation.py::test_invalid_run_args * Fix json schema for ray tests too * Update Documentation & Code Style * Reintroduce validation * Usa unstable version in tests and rest api * Make unstable support the latest versions * Update Documentation & Code Style * Remove needless fixture * Make type in pipeline optional in the strings validation * Fix schemas * Fix string validation for pipeline type * Improve validate_config_strings * Remove type from test p[ipelines * Update Documentation & Code Style * Fix test_pipeline * Removing more type from pipelines * Temporary CI patc * Fix issue with exportable_to_yaml never invoking the wrapped init * rm stray file * pipeline tests are green again * Linux CI now needs .[all] to generate the schema * Bugfixes, pipeline tests seems to be green * Typo in version after merge * Implement missing methods in Weaviate * Trying to avoid FAISS tests from running in the Milvus1 test suite * Fix some stray test paths and faiss index dumping * Fix pytest markers list * Temporarily disable cache to be able to see tests failures * Fix pyproject.toml syntax * Use only tmp_path * Fix preprocessor signature after merge * Fix faiss bug * Fix Ray test * Fix documentation issue by removing quotes from faiss type * Update Documentation & Code Style * use document properly in preprocessor tests * Update Documentation & Code Style * make preprocessor capable of handling documents * import document * Revert support for documents in preprocessor, do later * Fix bug in _json_schema.py that was breaking validation * re-enable cache * Update Documentation & Code Style * Simplify calling _json_schema.py from the CI * Remove redundant ABC inheritance * Ensure exportable_to_yaml works only on implementations * Rename subclass to class_ in Meta * Make run() and get_config() abstract in BasePipeline * Revert unintended change in preprocessor * Move outgoing_edges_input_node check inside try block * Rename VALID_CODE_GEN_INPUT_REGEX into VALID_INPUT_REGEX * Add check for a RecursionError on validate_config_strings * Address usages of _pipeline_config in data silo and elasticsearch * Rename _pipeline_config into _init_parameters * Fix pytest marker and remove unused imports * Remove most redundant ABCs * Rename _init_parameters into _component_configuration * Remove set_config and type from _component_configuration's dict * Remove last instances of set_config and replace with super().__init__() * Implement __init_subclass__ approach * Simplify checks on the existence of _component_configuration * Fix faiss issue * Dynamic generation of node schemas & weed out old schemas * Add debatable test * Add docstring to debatable test * Positive diff between schemas implemented * Improve diff printing * Rename REST API YAML files to trigger IDE validation * Fix typing issues * Fix more typing * Typo in YAML filename * Remove needless type:ignore * Add tests * Fix tests & validation feedback for accessory classes in custom nodes * Refactor RAGeneratorType out * Fix broken import in conftest * Improve source error handling * Remove unused import in test_eval.py breaking tests * Fix changed error message in tests matches too * Normalize generate_openapi_specs.py and generate_json_schema.py in the actions * Fix path to generate_openapi_specs.py in autoformat.yml * Update Documentation & Code Style * Add test for FAISSDocumentStore-like situations (superclass with init params) * Update Documentation & Code Style * Fix indentation * Remove commented set_config * Store model_name_or_path in FARMReader to use in DistillationDataSilo * Rename _component_configuration into _component_config * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
148
1
74,961
11
2
6
def _get_name_info(name_index, name_list): argval = name_index if name_list is not None: argval = name_list[name_index] argrepr = argval else: argrepr = repr(argval) return argval, argrepr
python3.10.4/Lib/dis.py
63
XX-Net
{ "docstring": "Helper to get optional details about named references\n\n Returns the dereferenced name as both value and repr if the name\n list is defined.\n Otherwise returns the name index and its repr().\n ", "language": "en", "n_whitespaces": 52, "n_words": 31, "vocab_size": 26 }
24
Python
17
8198943edd73a363c266633e1aa5b2a9e9c9f526
dis.py
222,538
8
38
_get_name_info
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
60
0
56,627
11
9
10
def cast_losses_to_common_dtype(losses): highest_float = None for loss in losses: if loss.dtype.is_floating: if highest_float is None or loss.dtype.size > highest_float.size: highest_float = loss.dtype elif {loss.dtype, highest_float} == {"bfloat16", "float16"}: highest_float = "float32" if loss.dtype.is_complex: return ( losses # If we find any complex losses, do not cast any losses ) if highest_float: losses = [tf.cast(loss, highest_float) for loss in losses] return losses
keras/utils/losses_utils.py
148
keras
{ "docstring": "Cast a list of losses to a common dtype.\n\n If any loss is floating-point, they will all be casted to the most-precise\n floating-point loss. Otherwise the losses are not casted. We also skip casting\n losses if there are any complex losses.\n\n Args:\n losses: A list of losses.\n\n Returns:\n `losses`, but they have been casted to a common dtype.\n ", "language": "en", "n_whitespaces": 86, "n_words": 58, "vocab_size": 42 }
61
Python
43
84afc5193d38057e2e2badf9c889ea87d80d8fbf
losses_utils.py
276,973
15
91
cast_losses_to_common_dtype
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
187
0
81,807
14
5
17
def get_available_models() -> List[str]: modelpath = os.path.join(os.path.dirname(__file__), "train", "model") models = sorted(item.name.replace(".py", "").replace("_", "-") for item in os.scandir(modelpath) if not item.name.startswith("_") and not item.name.endswith("defaults.py") and item.name.endswith(".py")) return models
plugins/plugin_loader.py
163
faceswap
{ "docstring": " Return a list of available training models\n\n Returns\n -------\n list:\n A list of the available training model plugin names\n ", "language": "en", "n_whitespaces": 59, "n_words": 19, "vocab_size": 15 }
28
Python
24
13cfb3f39e72e9ca181f173b7b3db2a048db0d08
plugin_loader.py
101,476
15
93
get_available_models
https://github.com/deepfakes/faceswap.git
extract: Add batch processing mode
148
0
20,889
15
1
13
def cosine_similarity(cooccurrence): diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) with np.errstate(invalid="ignore", divide="ignore"): result = cooccurrence / np.sqrt(diag_rows * diag_cols) return np.array(result)
recommenders/utils/python_utils.py
90
recommenders
{ "docstring": "Helper method to calculate the Cosine similarity of a matrix of\n co-occurrences.\n\n Cosine similarity can be interpreted as the angle between the i-th\n and j-th item.\n\n Args:\n cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items.\n\n Returns:\n numpy.ndarray: The matrix of cosine similarity between any two items.\n\n ", "language": "en", "n_whitespaces": 79, "n_words": 47, "vocab_size": 33 }
18
Python
17
1d7341e93d1f03387699fb3c6ae0b6c0e464296f
python_utils.py
39,442
5
51
cosine_similarity
https://github.com/microsoft/recommenders.git
Add new item similarity metrics for SAR (#1754) * Add mutual information similarity in SAR * Add lexicographers mutual information similarity for SAR * Add cosine similarity for SAR * Add inclusion index for SAR * Typos * Change SARSingleNode to SAR * Convert item similarity matrix to np.array * Update * Update SAR tests * Remove unused imports * Add explanations for new similarity metrics
37
0
7,234
12
1
2
def ray_start_client_server_for_address(address):
python/ray/util/client/ray_client_helpers.py
13
ray
{ "docstring": "\n Starts a Ray client server that initializes drivers at the specified address.\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 12 }
2
Python
2
297341e107daee1ea3aff991ae8ea8c90993c683
ray_client_helpers.py
133,959
4
20
ray_start_client_server_for_address
https://github.com/ray-project/ray.git
[Test][Client] Only start ray once in client tests (#28835) It looks like we're frequently starting and shutting down Ray in this test because `ray_start_client_server` isn't connecting to the Ray created by `ray_start_regular_shared`, and is instead starting a new Ray head process every time it launches. Ray client tests are failing frequently with: ``` [2022-10-06 07:31:46,253 E 13235 13751] core_worker_process.cc:277: The core worker has already been shutdown. This happens when the language frontend accesses the Ray's worker after it is shutdown. The process will exit ``` Which is probably caused by having multiple ray clusters running simultaneous, with some shutting down asynchronously. This refactor forces all of the tests in the module to use the same Ray cluster. Also fixes two other sources of potential flakiness: * Joins the thread in test_client_thread_safe (seems like this has a bad interaction when the client server is cleaned up) * Calls ray.get in `test_stdout_log_stream`, to make sure that the remote function is done running before we try searching for its output Should also have the happy side effect of speeding up test_client. Ran the `Small & Client` tests (regular and external redis) twice each, no flakes, and windows version of test_client.
5
0
30,162
6
1
5
def voidcmd(self, cmd): self.putcmd(cmd) return self.voidresp()
python3.10.4/Lib/ftplib.py
35
XX-Net
{ "docstring": "Send a command and expect a response beginning with '2'.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
ftplib.py
217,436
3
20
voidcmd
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
27
0
54,784
7
5
14
def all_shortest_paths(G, source, target, weight=None, method="dijkstra"): method = "unweighted" if weight is None else method if method == "unweighted": pred = nx.predecessor(G, source) elif method == "dijkstra": pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight) elif method == "bellman-ford": pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight) else: raise ValueError(f"method not supported: {method}") return _build_paths_from_predecessors({source}, target, pred)
networkx/algorithms/shortest_paths/generic.py
168
networkx
{ "docstring": "Compute all shortest simple paths in the graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node\n Starting node for path.\n\n target : node\n Ending node for path.\n\n weight : None, string or function, optional (default = None)\n If None, every edge has weight/distance/cost 1.\n If a string, use this edge attribute as the edge weight.\n Any edge attribute not present defaults to 1.\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly\n three positional arguments: the two endpoints of an edge and\n the dictionary of edge attributes for that edge.\n The function must return a number.\n\n method : string, optional (default = 'dijkstra')\n The algorithm to use to compute the path lengths.\n Supported options: 'dijkstra', 'bellman-ford'.\n Other inputs produce a ValueError.\n If `weight` is None, unweighted graph methods are used, and this\n suggestion is ignored.\n\n Returns\n -------\n paths : generator of lists\n A generator of all paths between source and target.\n\n Raises\n ------\n ValueError\n If `method` is not among the supported options.\n\n NetworkXNoPath\n If `target` cannot be reached from `source`.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> nx.add_path(G, [0, 1, 2])\n >>> nx.add_path(G, [0, 10, 2])\n >>> print([p for p in nx.all_shortest_paths(G, source=0, target=2)])\n [[0, 1, 2], [0, 10, 2]]\n\n Notes\n -----\n There may be many shortest paths between the source and target. If G\n contains zero-weight cycles, this function will not produce all shortest\n paths because doing so would produce infinitely many paths of unbounded\n length -- instead, we only produce the shortest simple paths.\n\n See Also\n --------\n shortest_path\n single_source_shortest_path\n all_pairs_shortest_path\n ", "language": "en", "n_whitespaces": 485, "n_words": 266, "vocab_size": 157 }
53
Python
36
b5d41847b8db0c82372faf69cd3a339d11da7ef0
generic.py
176,292
11
103
all_shortest_paths
https://github.com/networkx/networkx.git
DOC: Update documentation to include callables for weight argument (#5307) Update docs to include functions as valid input for weight argument.
102
0
41,809
12
1
8
def test_assumptions_about_jsonpatch(self): patch_1 = JsonPatch([{"op": "add", "path": "/hi", "value": "there"}]) patch_2 = JsonPatch([{"op": "add", "path": "/hi", "value": "there"}]) patch_3 = JsonPatch([{"op": "add", "path": "/different", "value": "there"}]) assert patch_1 is not patch_2 assert patch_1 == patch_2 assert patch_1 != patch_3 assert list(patch_1) == list(patch_2) assert list(patch_1) != list(patch_3) assert patch_1.apply({}) == patch_2.apply({}) assert patch_1.apply({}) != patch_3.apply({})
tests/flow_runners/test_kubernetes.py
239
prefect
{ "docstring": "Assert our assumptions about the behavior of the jsonpatch library, so we\n can be alert to any upstream changes", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 18 }
55
Python
24
ab322ef9b1bb65887984854dc39b316f98da3b97
test_kubernetes.py
56,187
11
131
test_assumptions_about_jsonpatch
https://github.com/PrefectHQ/prefect.git
Allow Kubernetes users to customize or replace the Job manifest for flow runs Adding support for either replacing the base `job=` for a KubernetesFlowRunner, applying a list of RFC 6902 JSON patches provided by `customizations=`, or both. This implements the core changes, while preserving backwards compatiblity with the current API. Users can still provide `image=`, `namepace=` and other top-level parameters, which are now considered "shortcuts" for generating JSON patches. This is most of the work for PrefectHQ/orion#1900, but does not include the planned CLI updates to allow users to preview their jobs. Those will come in a separate change. Also updating the Kubernetes integration tests to be more reliable, and adding docs about how to get set up for running them.
132
0
11,459
12
12
23
def can_fast_delete(self, objs, from_field=None): if from_field and from_field.remote_field.on_delete is not CASCADE: return False if hasattr(objs, "_meta"): model = objs._meta.model elif hasattr(objs, "model") and hasattr(objs, "_raw_delete"): model = objs.model else: return False if self._has_signal_listeners(model): return False # The use of from_field comes from the need to avoid cascade back to # parent when parent delete is cascading to child. opts = model._meta return ( all( link == from_field for link in opts.concrete_model._meta.parents.values() ) and # Foreign keys pointing to this model. all( related.field.remote_field.on_delete is DO_NOTHING for related in get_candidate_relations_to_delete(opts) ) and ( # Something like generic foreign key. not any( hasattr(field, "bulk_related_objects") for field in opts.private_fields ) ) )
django/db/models/deletion.py
230
django
{ "docstring": "\n Determine if the objects in the given queryset-like or single object\n can be fast-deleted. This can be done if there are no cascades, no\n parents and no signal listeners for the object class.\n\n The 'from_field' tells where we are coming from - we need this to\n determine if the objects are in fact to be deleted. Allow also\n skipping parent -> child -> parent chain preventing fast delete of\n the child.\n ", "language": "en", "n_whitespaces": 128, "n_words": 71, "vocab_size": 51 }
108
Python
70
9c19aff7c7561e3a82978a272ecdaad40dda5c00
deletion.py
205,460
29
142
can_fast_delete
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
471
0
51,132
16
4
12
def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None): if working_memory is None: working_memory = get_config()["working_memory"] chunk_n_rows = int(working_memory * (2**20) // row_bytes) if max_n_rows is not None: chunk_n_rows = min(chunk_n_rows, max_n_rows) if chunk_n_rows < 1: warnings.warn( "Could not adhere to working_memory config. " "Currently %.0fMiB, %.0fMiB required." % (working_memory, np.ceil(row_bytes * 2**-20)) ) chunk_n_rows = 1 return chunk_n_rows
sklearn/utils/__init__.py
148
scikit-learn
{ "docstring": "Calculates how many rows can be processed within working_memory.\n\n Parameters\n ----------\n row_bytes : int\n The expected number of bytes of memory that will be consumed\n during the processing of each row.\n max_n_rows : int, default=None\n The maximum return value.\n working_memory : int or float, default=None\n The number of rows to fit inside this number of MiB will be returned.\n When None (default), the value of\n ``sklearn.get_config()['working_memory']`` is used.\n\n Returns\n -------\n int or the value of n_samples\n\n Warns\n -----\n Issues a UserWarning if ``row_bytes`` exceeds ``working_memory`` MiB.\n ", "language": "en", "n_whitespaces": 164, "n_words": 86, "vocab_size": 63 }
55
Python
40
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
__init__.py
258,953
14
88
get_chunk_n_rows
https://github.com/scikit-learn/scikit-learn.git
MNT Update black to stable version (#22474)
141
0
75,492
16
6
25
def _prepare_socket_file(self, socket_path, default_prefix): result = socket_path is_mac = sys.platform.startswith("darwin") if sys.platform == "win32": if socket_path is None: result = f"tcp://{self._localhost}" f":{self._get_unused_port()}" else: if socket_path is None: result = self._make_inc_temp( prefix=default_prefix, directory_name=self._sockets_dir ) else: try_to_create_directory(os.path.dirname(socket_path)) # Check socket path length to make sure it's short enough maxlen = (104 if is_mac else 108) - 1 # sockaddr_un->sun_path if len(result.split("://", 1)[-1].encode("utf-8")) > maxlen: raise OSError( "AF_UNIX path length cannot exceed " "{} bytes: {!r}".format(maxlen, result) ) return result
python/ray/node.py
234
ray
{ "docstring": "Prepare the socket file for raylet and plasma.\n\n This method helps to prepare a socket file.\n 1. Make the directory if the directory does not exist.\n 2. If the socket file exists, do nothing (this just means we aren't the\n first worker on the node).\n\n Args:\n socket_path (string): the socket file to prepare.\n ", "language": "en", "n_whitespaces": 109, "n_words": 53, "vocab_size": 40 }
77
Python
56
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
node.py
130,803
20
127
_prepare_socket_file
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
333
0
29,375
17
1
6
def make_union(*transformers, n_jobs=None, verbose=False): return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose)
sklearn/pipeline.py
48
scikit-learn
{ "docstring": "Construct a FeatureUnion from the given transformers.\n\n This is a shorthand for the FeatureUnion constructor; it does not require,\n and does not permit, naming the transformers. Instead, they will be given\n names automatically based on their types. It also does not allow weighting.\n\n Parameters\n ----------\n *transformers : list of estimators\n One or more estimators.\n\n n_jobs : int, default=None\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n .. versionchanged:: v0.20\n `n_jobs` default changed from 1 to None.\n\n verbose : bool, default=False\n If True, the time elapsed while fitting each transformer will be\n printed as it is completed.\n\n Returns\n -------\n f : FeatureUnion\n A :class:`FeatureUnion` object for concatenating the results of multiple\n transformer objects.\n\n See Also\n --------\n FeatureUnion : Class for concatenating the results of multiple transformer\n objects.\n\n Examples\n --------\n >>> from sklearn.decomposition import PCA, TruncatedSVD\n >>> from sklearn.pipeline import make_union\n >>> make_union(PCA(), TruncatedSVD())\n FeatureUnion(transformer_list=[('pca', PCA()),\n ('truncatedsvd', TruncatedSVD())])\n ", "language": "en", "n_whitespaces": 349, "n_words": 164, "vocab_size": 115 }
8
Python
8
ecef8cb7f44ab6a8438b43eb33f519269511cbbf
pipeline.py
260,484
2
31
make_union
https://github.com/scikit-learn/scikit-learn.git
DOC numpydoc validation for `make_union` (#23909) Co-authored-by: Adrin Jalali <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
14
0
76,280
9
3
10
def sections(self) -> List[str]: return sorted(set(plugin.split(".")[0] for plugin in self._config.config.sections() if plugin.split(".")[0] != "writer"))
tools/preview/preview.py
87
faceswap
{ "docstring": " list: The sorted section names that exist within the convert Configuration options. ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 12 }
14
Python
14
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
preview.py
101,443
4
51
sections
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
53
0
20,856
15
3
8
def get_conn(self) -> paramiko.SFTPClient: # type: ignore[override] if self.conn is None: # TODO: remove support for ssh_hook when it is removed from SFTPOperator if self.ssh_hook is not None: self.conn = self.ssh_hook.get_conn().open_sftp() else: self.conn = super().get_conn().open_sftp() return self.conn
airflow/providers/sftp/hooks/sftp.py
105
airflow
{ "docstring": "\n Opens an SFTP connection to the remote host\n\n :rtype: paramiko.SFTPClient\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
37
Python
28
f3aacebe502c4ea5dc2b7d29373539296fa037eb
sftp.py
43,253
12
61
get_conn
https://github.com/apache/airflow.git
Convert sftp hook to use paramiko instead of pysftp (#24512)
122
0
7,891
17
2
10
def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] if not self.targets: raise Exception('There must be one or more targets.') assert type_guard(self.targets, target_type) return t.cast(t.List[THostConfig], self.targets)
test/lib/ansible_test/_internal/config.py
72
ansible
{ "docstring": "\n Return a list of target host configurations.\n Requires that there are one or more targets, all the specified type.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
25
Python
25
a06fa496d3f837cca3c437ab6e9858525633d147
config.py
266,768
5
44
only_targets
https://github.com/ansible/ansible.git
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
65
0
78,571
10
4
32
def update_replay_sample_priority(self) -> int: num_samples_trained_this_itr = 0 for _ in range(self.learner_thread.outqueue.qsize()): if self.learner_thread.is_alive(): ( replay_actor, priority_dict, env_steps, agent_steps, ) = self.learner_thread.outqueue.get(timeout=0.001) if ( self.config["replay_buffer_config"].get("prioritized_replay_alpha") > 0 ): replay_actor.update_priorities.remote(priority_dict) num_samples_trained_this_itr += env_steps self.update_target_networks(env_steps) self._counters[NUM_ENV_STEPS_TRAINED] += env_steps self._counters[NUM_AGENT_STEPS_TRAINED] += agent_steps self.workers.local_worker().set_global_vars( {"timestep": self._counters[NUM_ENV_STEPS_TRAINED]} ) else: raise RuntimeError("The learner thread died in while training") self._counters[STEPS_TRAINED_THIS_ITER_COUNTER] = num_samples_trained_this_itr self._timers["learner_dequeue"] = self.learner_thread.queue_timer self._timers["learner_grad"] = self.learner_thread.grad_timer self._timers["learner_overall"] = self.learner_thread.overall_timer
rllib/agents/dqn/apex.py
296
ray
{ "docstring": "Update the priorities of the sample batches with new priorities that are\n computed by the learner thread.\n\n Returns:\n The number of samples trained by the learner thread since the last\n training iteration.\n ", "language": "en", "n_whitespaces": 75, "n_words": 32, "vocab_size": 24 }
63
Python
48
b76273357bd1b74757b0aa1d64cee551369d7fa6
apex.py
139,297
35
183
update_replay_sample_priority
https://github.com/ray-project/ray.git
[RLlib] APEX-DQN replay buffer config validation fix. (#24588)
451
0
31,651
15
4
10
def make_action_immutable(obj): if isinstance(obj, np.ndarray): obj.setflags(write=False) return obj elif isinstance(obj, OrderedDict): return MappingProxyType(dict(obj)) elif isinstance(obj, dict): return MappingProxyType(obj) else: return obj
rllib/utils/numpy.py
96
ray
{ "docstring": "Flags actions immutable to notify users when trying to change\n them.\n\n Can also be used with any tree-like structure containing either\n dictionaries, numpy arrays or already immutable objects per se.\n Note, however that `tree.map_structure()` will in general not \n include the shallow object containing all others and therefore\n immutability will hold only for all objects contained in it.\n Use `tree.traverse(fun, action, top_down=False)` to include\n also the containing object.\n\n Args:\n obj: The object to be made immutable.\n\n Returns:\n The immutable object.\n\n Examples:\n >>> import tree\n >>> import numpy as np\n >>> arr = np.arange(1,10)\n >>> d = dict(a = 1, b = (arr, arr))\n >>> tree.traverse(make_action_immutable, d, top_down=False)\n ", "language": "en", "n_whitespaces": 192, "n_words": 106, "vocab_size": 79 }
21
Python
14
ff575eeafc610b5a71fac37682e388476b2fb8ea
numpy.py
138,869
10
59
make_action_immutable
https://github.com/ray-project/ray.git
[RLlib] Make actions sent by RLlib to the env immutable. (#24262)
87
0
31,539
12
2
6
def test_system_config(ray_start_cluster_head): cluster = ray_start_cluster_head worker = cluster.add_node() cluster.wait_for_nodes()
python/ray/tests/test_multi_node_2.py
39
ray
{ "docstring": "Checks that the internal configuration setting works.\n\n We set the cluster to timeout nodes after 2 seconds of no timeouts. We\n then remove a node, wait for 1 second to check that the cluster is out\n of sync, then wait another 2 seconds (giving 1 second of leeway) to check\n that the client has timed out. We also check to see if the config is set.\n ", "language": "en", "n_whitespaces": 81, "n_words": 66, "vocab_size": 43 }
9
Python
8
fdc7077dbcd8f54991cd36f6890d219519260dc4
test_multi_node_2.py
135,576
12
83
test_system_config
https://github.com/ray-project/ray.git
[core] Introduce pull based health check to GCS. (#29442) This PR introduced the pull-based health check to GCS. This is to fix the false positive issues when GCS is overloaded and incorrectly marks the healthy node as dead. The health check service in each ray component is implemented using gRPC built-in services. This PR focus on the client-side health check. The following features are supported: - Initial delay when a new node is added. This is for the new node to be able to ramp up. - Timeout for an RPC: in case of network issues, we introduce timeout, and the request fails to return within timeout is considered a failure. - If the health check failed X times consecutively, the node will be considered as dead. - We also introduce the interval that can be configured between two health checks sent. This client doesn't send two health checks in parallel, so the next one always waits until the first one is finished. This work has reference to k8s's healthiness probe features. A feature flag is introduced to turn it on or off and it's turned on in https://github.com/ray-project/ray/pull/29536
21
0
30,660
8
1
3
def get_backend() -> ValidBackends: return _FS_BACKEND
lib/utils.py
18
faceswap
{ "docstring": " Get the backend that Faceswap is currently configured to use.\n\n Returns\n -------\n str\n The backend configuration in use by Faceswap\n ", "language": "en", "n_whitespaces": 40, "n_words": 20, "vocab_size": 18 }
6
Python
6
91fecc47b2157d684ab9c219a860df51543222a3
utils.py
100,998
9
9
get_backend
https://github.com/deepfakes/faceswap.git
lib.Utils - add DPI detector
12
0
20,441
6
1
4
def quote_name(self, name): raise NotImplementedError( "subclasses of BaseDatabaseOperations may require a quote_name() method" )
django/db/backends/base/operations.py
25
django
{ "docstring": "\n Return a quoted version of the given table, index, or column name. Do\n not quote the given name if it's already been quoted.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 21 }
14
Python
14
9c19aff7c7561e3a82978a272ecdaad40dda5c00
operations.py
204,863
4
13
quote_name
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
46
0
50,940
8
1
6
def inspect(self) -> DockerInspect: return docker_inspect(self.args, self.container_id)
test/lib/ansible_test/_internal/connections.py
32
ansible
{ "docstring": "Inspect the container and return a DockerInspect instance with the results.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
7
Python
7
3eb0485dd92c88cc92152d3656d94492db44b183
connections.py
267,949
3
19
inspect
https://github.com/ansible/ansible.git
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
21
0
79,224
8
2
14
def to_state_create(self) -> schemas.actions.StateCreate: from prefect.results import BaseResult return schemas.actions.StateCreate( type=self.type, name=self.name, message=self.message, data=self.data if isinstance(self.data, BaseResult) else None, state_details=self.state_details, )
src/prefect/client/schemas.py
99
prefect
{ "docstring": "\n Convert this state to a `StateCreate` type which can be used to set the state of\n a run in the API.\n\n This method will drop this state's `data` if it is not a result type. Only\n results should be sent to the API. Other data is only available locally.\n ", "language": "en", "n_whitespaces": 85, "n_words": 49, "vocab_size": 38 }
21
Python
21
2f2faf370f602cfd9df307ff71e785c1c9d6a538
schemas.py
59,285
16
67
to_state_create
https://github.com/PrefectHQ/prefect.git
Update engine to use new results (#7094) # Conflicts: # .github/workflows/integration-tests.yaml # src/prefect/deployments.py # src/prefect/engine.py
104
0
11,889
12
1
13
def _jvp(f, primals, tangents): with tf.autodiff.ForwardAccumulator(primals, tangents) as acc: primals_out = f(*primals) return primals_out, acc.jvp( primals_out, unconnected_gradients=tf.UnconnectedGradients.ZERO )
keras/integration_test/forwardprop_test.py
78
keras
{ "docstring": "Compute the jacobian of `f` at `primals` multiplied by `tangents`.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
18
Python
17
84afc5193d38057e2e2badf9c889ea87d80d8fbf
forwardprop_test.py
272,179
6
48
_jvp
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
44
0
80,971
11
5
14
def _signature_bound_method(sig): params = tuple(sig.parameters.values()) if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY): raise ValueError('invalid method signature') kind = params[0].kind if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY): # Drop first parameter: # '(p1, p2[, ...])' -> '(p2[, ...])' params = params[1:] else: if kind is not _VAR_POSITIONAL: # Unless we add a new parameter type we never # get here raise ValueError('invalid argument type') # It's a var-positional parameter. # Do nothing. '(*args[, ...])' -> '(*args[, ...])' return sig.replace(parameters=params)
python3.10.4/Lib/inspect.py
147
XX-Net
{ "docstring": "Private helper to transform signatures for unbound\n functions to bound methods.\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 10 }
77
Python
52
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,395
11
86
_signature_bound_method
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
180
0
55,281
13
5
13
def subs(self, *args, **kwargs): # should mirror core.basic.subs if len(args) == 1 and not isinstance(args[0], (dict, set)) and iter(args[0]) and not is_sequence(args[0]): args = (list(args[0]),) return self.applyfunc(lambda x: x.subs(*args, **kwargs))
sympy/matrices/common.py
130
sympy
{ "docstring": "Return a new matrix with subs applied to each entry.\n\n Examples\n ========\n\n >>> from sympy.abc import x, y\n >>> from sympy import SparseMatrix, Matrix\n >>> SparseMatrix(1, 1, [x])\n Matrix([[x]])\n >>> _.subs(x, y)\n Matrix([[y]])\n >>> Matrix(_).subs(y, x)\n Matrix([[x]])\n ", "language": "en", "n_whitespaces": 114, "n_words": 37, "vocab_size": 30 }
30
Python
27
59d22b6bb7287613d598611027f640d068ca5748
common.py
196,366
4
83
subs
https://github.com/sympy/sympy.git
Moved imports to higher level
64
0
47,866
12
4
12
def _resolve_script(self, script_entity_id) -> None: for entity in script.entities_in_script(self.hass, script_entity_id): self._add_or_resolve("entity", entity) for device in script.devices_in_script(self.hass, script_entity_id): self._add_or_resolve("device", device) for area in script.areas_in_script(self.hass, script_entity_id): self._add_or_resolve("area", area)
homeassistant/components/search/__init__.py
120
core
{ "docstring": "Resolve a script.\n\n Will only be called if script is an entry point.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 13 }
26
Python
20
c0b04e9f91a34dfee0ceb12770148317fe3e2cbf
__init__.py
307,722
11
76
_resolve_script
https://github.com/home-assistant/core.git
Sort some code in the search integration (#78519)
87
0
106,489
10
1
7
def test_escape_sequence_resulting_in_multiple_keypresses(parser): events = list(parser.feed("\x1b[2;4~")) assert len(events) == 2 assert events[0].key == "escape" assert events[1].key == "shift+insert"
tests/test_xterm_parser.py
75
textual
{ "docstring": "Some sequences are interpreted as more than 1 keypress", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
17
Python
13
bfb962bacf274373e5706090cd854b6aa0857270
test_xterm_parser.py
183,781
5
42
test_escape_sequence_resulting_in_multiple_keypresses
https://github.com/Textualize/textual.git
Backtracking unknown escape sequences, various tests for XTermParser
32
0
44,334
11
2
5
async def _api_startup_event(self): if not ApiServer._message_stream: ApiServer._message_stream = MessageStream()
freqtrade/rpc/api_server/webserver.py
36
freqtrade
{ "docstring": "\n Creates the MessageStream class on startup\n so it has access to the same event loop\n as uvicorn\n ", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 16 }
9
Python
9
442467e8aed2ff639bfba04e7a2f6e175f774af1
webserver.py
151,672
3
19
_api_startup_event
https://github.com/freqtrade/freqtrade.git
remove old comments and code
34
0
35,096
10
1
24
def test_ohe_infrequent_three_levels_drop_frequent(drop): X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( handle_unknown="infrequent_if_exist", sparse=False, max_categories=3, drop=drop ).fit(X_train) X_test = np.array([["b"], ["c"], ["d"]]) assert_allclose([[0, 0], [1, 0], [0, 1]], ohe.transform(X_test)) # Check handle_unknown="ignore" ohe.set_params(handle_unknown="ignore").fit(X_train) msg = "Found unknown categories" with pytest.warns(UserWarning, match=msg): X_trans = ohe.transform([["b"], ["e"]]) assert_allclose([[0, 0], [0, 0]], X_trans) @pytest.mark.parametrize("drop", [["a"], ["d"]])
sklearn/preprocessing/tests/test_encoders.py
322
@pytest.mark.parametrize("drop", [["a"], ["d"]])
scikit-learn
{ "docstring": "Test three levels and dropping the frequent category.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
63
Python
49
7f0006c8aad1a09621ad19c3db19c3ff0555a183
test_encoders.py
259,235
12
176
test_ohe_infrequent_three_levels_drop_frequent
https://github.com/scikit-learn/scikit-learn.git
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
109
1
75,667
16
1
3
def indented(self): cli_logger = self
python/ray/autoscaler/_private/cli_logger.py
18
ray
{ "docstring": "Context manager that starts an indented block of output.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
5
Python
5
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
cli_logger.py
130,421
6
20
indented
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
19
0
29,267
6
1
3
def lsb_release_info(self): # type: () -> Dict[str, str] return self._lsb_release_info
pipenv/patched/notpip/_vendor/distro.py
20
pipenv
{ "docstring": "\n Return a dictionary containing key-value pairs for the information\n items from the lsb_release command data source of the OS\n distribution.\n\n For details, see :func:`distro.lsb_release_info`.\n ", "language": "en", "n_whitespaces": 60, "n_words": 24, "vocab_size": 22 }
10
Python
10
f3166e673fe8d40277b804d35d77dcdb760fc3b3
distro.py
20,074
2
10
lsb_release_info
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
31
0
3,219
6
1
5
def test_batch_mapper_numpy_data_format(ds_with_expected_pandas_numpy_df): ds, expected_df, expected_numpy_df = ds_with_expected_pandas_numpy_df
python/ray/data/tests/test_batch_mapper.py
23
ray
{ "docstring": "Tests batch mapper functionality for numpy data format.\n\n Note:\n For single column pandas dataframes, we automatically convert it to\n single column tensor with column name as `__value__`.\n ", "language": "en", "n_whitespaces": 47, "n_words": 27, "vocab_size": 24 }
7
Python
7
9c39a28ba2f6221ffd8327fa21cb8294f0390fee
test_batch_mapper.py
128,154
20
145
test_batch_mapper_numpy_data_format
https://github.com/ray-project/ray.git
[AIR][Numpy] Add numpy narrow waist to `Preprocessor` and `BatchMapper` (#28418) Co-authored-by: Eric Liang <[email protected]> Co-authored-by: Clark Zinzow <[email protected]> Co-authored-by: Amog Kamsetty <[email protected]>
13
0
28,614
7
1
14
def test_realm_admin_remove_others_from_public_stream(self) -> None: result = self.attempt_unsubscribe_of_principal( query_count=15, target_users=[self.example_user("cordelia")], is_realm_admin=True, is_subbed=True, invite_only=False, target_users_subbed=True, ) json = self.assert_json_success(result) self.assert_length(json["removed"], 1) self.assert_length(json["not_removed"], 0)
zerver/tests/test_subs.py
120
zulip
{ "docstring": "\n If you're a realm admin, you can remove people from public streams, even\n those you aren't on.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
21
Python
20
803982e87254e3b1ebcb16ed795e224afceea3a3
test_subs.py
83,836
16
76
test_realm_admin_remove_others_from_public_stream
https://github.com/zulip/zulip.git
message_flags: Short-circuit if no messages changed. Omit sending an event, and updating the database, if there are no matching messages.
129
0
17,731
13
5
11
def _splitext(p, sep, altsep, extsep): # NOTE: This code must work for text and bytes strings. sepIndex = p.rfind(sep) if altsep: altsepIndex = p.rfind(altsep) sepIndex = max(sepIndex, altsepIndex) dotIndex = p.rfind(extsep) if dotIndex > sepIndex: # skip all leading dots filenameIndex = sepIndex + 1 while filenameIndex < dotIndex: if p[filenameIndex:filenameIndex+1] != extsep: return p[:dotIndex], p[dotIndex:] filenameIndex += 1 return p, p[:0]
python3.10.4/Lib/genericpath.py
154
XX-Net
{ "docstring": "Split the extension from a pathname.\n\n Extension is everything from the last dot to the end, ignoring\n leading dots. Returns \"(root, ext)\"; ext may be empty.", "language": "en", "n_whitespaces": 32, "n_words": 26, "vocab_size": 23 }
62
Python
48
8198943edd73a363c266633e1aa5b2a9e9c9f526
genericpath.py
217,520
13
97
_splitext
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
155
0
54,832
14
1
23
def any_numeric_dtype(request): return request.param # categoricals are handled separately _any_skipna_inferred_dtype = [ ("string", ["a", np.nan, "c"]), ("string", ["a", pd.NA, "c"]), ("mixed", ["a", pd.NaT, "c"]), # pd.NaT not considered valid by is_string_array ("bytes", [b"a", np.nan, b"c"]), ("empty", [np.nan, np.nan, np.nan]), ("empty", []), ("mixed-integer", ["a", np.nan, 2]), ("mixed", ["a", np.nan, 2.0]), ("floating", [1.0, np.nan, 2.0]), ("integer", [1, np.nan, 2]), ("mixed-integer-float", [1, np.nan, 2.0]), ("decimal", [Decimal(1), np.nan, Decimal(2)]), ("boolean", [True, np.nan, False]), ("boolean", [True, pd.NA, False]), ("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]), ("datetime", [Timestamp("20130101"), np.nan, Timestamp("20180101")]), ("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]), # The following two dtypes are commented out due to GH 23554 # ('complex', [1 + 1j, np.nan, 2 + 2j]), # ('timedelta64', [np.timedelta64(1, 'D'), # np.nan, np.timedelta64(2, 'D')]), ("timedelta", [timedelta(1), np.nan, timedelta(2)]), ("time", [time(1), np.nan, time(2)]), ("period", [Period(2013), pd.NaT, Period(2018)]), ("interval", [Interval(0, 1), np.nan, Interval(0, 2)]), ] ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id @pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)
pandas/conftest.py
591
@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)
pandas
{ "docstring": "\n Parameterized fixture for all numeric dtypes.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * float\n * 'float32'\n * 'float64'\n * complex\n * 'complex64'\n * 'complex128'\n * 'UInt8'\n * 'Int8'\n * 'UInt16'\n * 'Int16'\n * 'UInt32'\n * 'Int32'\n * 'UInt64'\n * 'Int64'\n * 'Float32'\n * 'Float64'\n ", "language": "en", "n_whitespaces": 138, "n_words": 56, "vocab_size": 32 }
149
Python
103
fe9e5d023e20304ad1bdfa1da53f3af452c72a00
conftest.py
169,100
2
10
any_numeric_dtype
https://github.com/pandas-dev/pandas.git
REGR: .describe on unsigned dtypes results in object (#48473)
244
1
40,391
10
3
11
def get_variation_axes(self): try: axes = self.font.getvaraxes() except AttributeError as e: msg = "FreeType 2.9.1 or greater is required" raise NotImplementedError(msg) from e for axis in axes: axis["name"] = axis["name"].replace(b"\x00", b"") return axes
src/PIL/ImageFont.py
100
Pillow
{ "docstring": "\n :returns: A list of the axes in a variation font.\n :exception OSError: If the font is not a variation font.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 16 }
32
Python
29
2ae55ccbdad9c842929fb238ea1eb81d1f999024
ImageFont.py
243,751
9
57
get_variation_axes
https://github.com/python-pillow/Pillow.git
Improve exception traceback readability
111
0
70,111
12
2
20
def flatten(index, name="segmented_flatten"): batch_size = tf.reduce_prod(index.batch_shape()) offset = tf.range(batch_size) * index.num_segments offset = tf.reshape(offset, index.batch_shape()) for _ in range(index.batch_dims, index.indices.shape.rank): offset = tf.expand_dims(offset, -1) indices = tf.cast(offset, index.indices.dtype) + index.indices return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0)
src/transformers/models/tapas/modeling_tf_tapas.py
193
transformers
{ "docstring": "\n Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements\n distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with\n `num_segments` multiplied by the number of elements in the batch.\n\n Args:\n index: IndexMap to flatten.\n name: Name for the TensorFlow operation.\n\n Returns:\n The flattened IndexMap.\n ", "language": "en", "n_whitespaces": 99, "n_words": 65, "vocab_size": 51 }
37
Python
30
f04257fdbcb6ecb5a9bef75f4c2a8d2e8b5a6209
modeling_tf_tapas.py
38,044
8
124
flatten
https://github.com/huggingface/transformers.git
Add test to ensure models can take int64 inputs (#17210) * Add test to ensure models can take int64 inputs * is_integer is an attribute, not a method * Fix test when some inputs aren't tensors * Add casts to blenderbot and blenderbot-small * Add casts to the other failing models
65
0
6,902
12
1
37
def test_job_job_events_children_summary_is_tree(get, organization_factory, job_template_factory): objs = organization_factory("org", superusers=['admin']) jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template job = jt.create_unified_job() url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk}) response = get(url, user=objs.superusers.admin, expect=200) assert response.data["event_processing_finished"] == False JobEvent.create_from_data( job_id=job.pk, uuid='uuid1', parent_uuid='', event="playbook_on_start", counter=1, stdout='a' * 1024, job_created=job.created ).save() JobEvent.create_from_data( job_id=job.pk, uuid='uuid2', parent_uuid='uuid1', event="playbook_on_play_start", counter=2, stdout='a' * 1024, job_created=job.created ).save() JobEvent.create_from_data( job_id=job.pk, uuid='uuid3', parent_uuid='uuid2', event="playbook_on_task_start", counter=3, stdout='a' * 1024, job_created=job.created ).save() JobEvent.create_from_data(job_id=job.pk, uuid='uuid4', parent_uuid='', event='verbose', counter=4, stdout='a' * 1024, job_created=job.created).save() JobEvent.create_from_data( job_id=job.pk, uuid='uuid5', parent_uuid='uuid1', event="playbook_on_play_start", counter=5, stdout='a' * 1024, job_created=job.created ).save() JobEvent.create_from_data( job_id=job.pk, uuid='uuid6', parent_uuid='uuid2', event="playbook_on_task_start", counter=6, stdout='a' * 1024, job_created=job.created ).save() job.emitted_events = job.get_event_queryset().count() job.status = "successful" job.save() url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk}) response = get(url, user=objs.superusers.admin, expect=200) assert response.data["children_summary"] == {} assert response.data["meta_event_nested_uuid"] == {} assert response.data["event_processing_finished"] == True assert response.data["is_tree"] == False
awx/main/tests/functional/api/test_events.py
722
awx
{ "docstring": "\n children_summary should return {is_tree: False} if the event structure is not tree-like\n \n E1\n E2\n E3\n E4 (verbose)\n E5\n E6 <-- parent is E2, but comes after another \"branch\" E5\n ", "language": "en", "n_whitespaces": 74, "n_words": 29, "vocab_size": 27 }
128
Python
65
550d9d5e42a605a23cb540584bf439c07c4185d4
test_events.py
81,243
40
442
test_job_job_events_children_summary_is_tree
https://github.com/ansible/awx.git
detect if job events are tree-like and collapsable in the UI
248
0
17,173
12
4
22
def normalize_span_op_histogram_results(span_op, histogram_params, results): histogram_column = get_span_count_histogram_column(span_op, histogram_params) bin_name = get_function_alias(histogram_column) # zerofill and rename the columns while making sure to adjust for precision bucket_map = {} for row in results["data"]: # we expect the bin the be an integer, this is because all floating # point values are rounded during the calculation bucket = int(row[bin_name]) bucket_map[bucket] = row["count"] new_data = [] for i in range(histogram_params.num_buckets): bucket = histogram_params.start_offset + histogram_params.bucket_size * i row = {"bin": bucket, "count": bucket_map.get(bucket, 0)} if histogram_params.multiplier > 1: row["bin"] /= float(histogram_params.multiplier) new_data.append(row) return new_data
src/sentry/snuba/discover.py
203
sentry
{ "docstring": "\n Normalizes the span histogram results by renaming the columns to key and bin\n and make sure to zerofill any missing values.\n\n :param str span_op: The span op for which you want to generate the\n histograms for.\n :param HistogramParams histogram_params: The histogram parameters used.\n :param any results: The results from the histogram query that may be missing\n bins and needs to be normalized.\n ", "language": "en", "n_whitespaces": 95, "n_words": 62, "vocab_size": 43 }
90
Python
71
12bb908ad28a4c1b6564253053a6f65ba4cdded9
discover.py
93,873
15
124
normalize_span_op_histogram_results
https://github.com/getsentry/sentry.git
feat(spans): Add a span count distribution endpoint (#36957) * hack histogram endpoint to serve span counts * wip * clean up * more clean up * clean up * fixes and test * address comments
184
0
19,022
13
5
11
def simplify_ner_for_qa(output): compact_output = [] for answer in output["answers"]: entities = [] for entity in answer.meta["entities"]: if ( entity["start"] >= answer.offsets_in_document[0].start and entity["end"] <= answer.offsets_in_document[0].end ): entities.append(entity["word"]) compact_output.append({"answer": answer.answer, "entities": entities}) return compact_output
haystack/nodes/extractor/entity.py
152
haystack
{ "docstring": "\n Returns a simplified version of the output dictionary\n with the following structure:\n [\n {\n answer: { ... }\n entities: [ { ... }, {} ]\n }\n ]\n The entities included are only the ones that overlap with\n the answer itself.\n\n :param output: Output from a query pipeline\n ", "language": "en", "n_whitespaces": 108, "n_words": 47, "vocab_size": 36 }
33
Python
28
15a59fd04071dc1e13c256680407ba1b63e7b1f2
entity.py
257,972
12
90
simplify_ner_for_qa
https://github.com/deepset-ai/haystack.git
feat: Updated EntityExtractor to handle long texts and added better postprocessing (#3154) * Remove dependence on HuggingFace TokenClassificationPipeline and group all postprocessing functions under one class * Added copyright notice for HF and deepset to entity file to acknowledge that a lot of the postprocessing parts came from the transformers library. * Fixed text squishing problem. Added additional unit test for it. Co-authored-by: ju-gu <[email protected]>
133
0
75,176
15
1
7
def _combine_individual_stats(self, operator_count, cv_score, individual_stats): stats = deepcopy( individual_stats ) # Deepcopy, since the string reference to predecessor should be cloned stats["operator_count"] = operator_count stats["internal_cv_score"] = cv_score return stats
tpot/base.py
55
tpot
{ "docstring": "Combine the stats with operator count and cv score and preprare to be written to _evaluated_individuals\n\n Parameters\n ----------\n operator_count: int\n number of components in the pipeline\n cv_score: float\n internal cross validation score\n individual_stats: dictionary\n dict containing statistics about the individual. currently:\n 'generation': generation in which the individual was evaluated\n 'mutation_count': number of mutation operations applied to the individual and its predecessor cumulatively\n 'crossover_count': number of crossover operations applied to the individual and its predecessor cumulatively\n 'predecessor': string representation of the individual\n\n Returns\n -------\n stats: dictionary\n dict containing the combined statistics:\n 'operator_count': number of operators in the pipeline\n 'internal_cv_score': internal cross validation score\n and all the statistics contained in the 'individual_stats' parameter\n ", "language": "en", "n_whitespaces": 295, "n_words": 111, "vocab_size": 66 }
29
Python
26
388616b6247ca4ea8de4e2f340d6206aee523541
base.py
181,816
7
32
_combine_individual_stats
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
83
0
43,600
8
1
13
def test_ridgecv_normalize_deprecated(Estimator): X = np.array([[1, -1], [1, 1]]) y = np.array([0, 1]) estimator = Estimator(normalize=True) with pytest.warns( FutureWarning, match=r"Set parameter alphas to: original_alphas \* n_samples" ): estimator.fit(X, y)
sklearn/linear_model/tests/test_ridge.py
108
scikit-learn
{ "docstring": "Check that the normalize deprecation warning mentions the rescaling of alphas\n\n Non-regression test for issue #22540\n ", "language": "en", "n_whitespaces": 22, "n_words": 16, "vocab_size": 15 }
28
Python
26
f14af688b7e77ecb6df9dfee93ec39b6c0334b86
test_ridge.py
259,066
8
68
test_ridgecv_normalize_deprecated
https://github.com/scikit-learn/scikit-learn.git
FIX Make Ridge*CV warn about rescaling alphas with scaling (#22585)
60
0
75,551
11
4
12
def load(cls, request_or_site=None): # We can only cache on the request, so if there is no request then # we know there's nothing in the cache. if request_or_site is None or isinstance(request_or_site, Site): return cls._get_or_create() # Check if we already have this in the cache and return it if so. attr_name = cls.get_cache_attr_name() if hasattr(request_or_site, attr_name): return getattr(request_or_site, attr_name) obj = cls._get_or_create() # Cache for next time. setattr(request_or_site, attr_name, obj) return obj
wagtail/contrib/settings/models.py
110
wagtail
{ "docstring": "\n Get or create an instance of this model. There is only ever one\n instance of models inheriting from `AbstractSetting` so we can\n use `pk=1`.\n\n If `request_or_site` is present and is a request object, then we cache\n the result on the request for faster repeat access.\n ", "language": "en", "n_whitespaces": 88, "n_words": 45, "vocab_size": 38 }
72
Python
53
d967eccef28ce47f60d26be1c28f2d83a25f40b0
models.py
78,250
9
67
load
https://github.com/wagtail/wagtail.git
Add generic settings to compliment site-specific settings (#8327)
171
0
16,749
9
1
6
def map_to_type(self, name, cls): self.registry[name.lower()] = cls
python3.10.4/Lib/email/headerregistry.py
36
XX-Net
{ "docstring": "Register cls as the specialized class for handling \"name\" headers.\n\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
7
Python
7
8198943edd73a363c266633e1aa5b2a9e9c9f526
headerregistry.py
223,760
2
22
map_to_type
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
21
0
57,051
9
2
5
def search_projects(self, group=None, query=None, simple=True):
src/sentry/integrations/gitlab/client.py
28
sentry
{ "docstring": "Get projects\n\n See https://docs.gitlab.com/ee/api/groups.html#list-a-group-s-projects\n and https://docs.gitlab.com/ee/api/projects.html#list-all-projects\n ", "language": "en", "n_whitespaces": 27, "n_words": 6, "vocab_size": 6 }
5
Python
5
73959a1d9b946cd0b7054bebcbc9f50929bc9dc3
client.py
95,879
8
55
search_projects
https://github.com/getsentry/sentry.git
I have rebased 15188 (round #2) (#31375) * Make GitLab Group Path optional Co-authored-by: King Chung Huang <[email protected]> Co-authored-by: Colleen O'Rourke <[email protected]>
12
0
19,254
6
1
11
def test_launcher_ensures_stdio(self): from kitty.constants import kitty_exe import subprocess exe = kitty_exe() cp = subprocess.run([exe, '+runpy', f]) self.assertEqual(cp.returncode, 0)
kitty_tests/check_build.py
76
kitty
{ "docstring": "\\\nimport os, sys\nif sys.stdin:\n os.close(sys.stdin.fileno())\nif sys.stdout:\n os.close(sys.stdout.fileno())\nif sys.stderr:\n os.close(sys.stderr.fileno())\nos.execlp({exe!r}, 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)')\n", "language": "en", "n_whitespaces": 37, "n_words": 34, "vocab_size": 26 }
18
Python
16
e2a1f8dde783c55dbca449691986923cb4025721
check_build.py
103,729
15
43
test_launcher_ensures_stdio
https://github.com/kovidgoyal/kitty.git
...
52
0
21,712
11
1
15
def test_redirect_to_default(self): start_url = reverse("wagtailsettings:edit", args=["tests", "testsetting"]) dest_url = reverse( "wagtailsettings:edit", args=["tests", "testsetting", self.default_site.pk] ) response = self.client.get(start_url, follow=True) self.assertRedirects( response, dest_url, status_code=302, fetch_redirect_response=False )
wagtail/contrib/settings/tests/test_admin.py
116
wagtail
{ "docstring": "\n Should redirect to the setting for the default site.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
25
Python
21
d10f15e55806c6944827d801cd9c2d53f5da4186
test_admin.py
73,498
9
70
test_redirect_to_default
https://github.com/wagtail/wagtail.git
Reformat with black
96
0
16,029
12
1
14
def _unfold(arr, axis, size, step): new_shape = [*arr.shape, size] new_strides = [*arr.strides, arr.strides[axis]] new_shape[axis] = (new_shape[axis] - size) // step + 1 new_strides[axis] = new_strides[axis] * step return np.lib.stride_tricks.as_strided(arr, shape=new_shape, strides=new_strides, writeable=False)
lib/matplotlib/cbook/__init__.py
129
matplotlib
{ "docstring": "\n Append an extra dimension containing sliding windows along *axis*.\n\n All windows are of size *size* and begin with every *step* elements.\n\n Parameters\n ----------\n arr : ndarray, shape (N_1, ..., N_k)\n The input array\n axis : int\n Axis along which the windows are extracted\n size : int\n Size of the windows\n step : int\n Stride between first elements of subsequent windows.\n\n Returns\n -------\n ndarray, shape (N_1, ..., 1 + (N_axis-size)/step, ..., N_k, size)\n\n Examples\n --------\n >>> i, j = np.ogrid[:3, :7]\n >>> a = i*10 + j\n >>> a\n array([[ 0, 1, 2, 3, 4, 5, 6],\n [10, 11, 12, 13, 14, 15, 16],\n [20, 21, 22, 23, 24, 25, 26]])\n >>> _unfold(a, axis=1, size=3, step=2)\n array([[[ 0, 1, 2],\n [ 2, 3, 4],\n [ 4, 5, 6]],\n [[10, 11, 12],\n [12, 13, 14],\n [14, 15, 16]],\n [[20, 21, 22],\n [22, 23, 24],\n [24, 25, 26]]])\n ", "language": "en", "n_whitespaces": 352, "n_words": 145, "vocab_size": 106 }
32
Python
27
13438f842729df1b04445d44ea83f616d1b85567
__init__.py
110,054
9
85
_unfold
https://github.com/matplotlib/matplotlib.git
Fix some minor docstring typos
176
0
23,899
11
2
18
def test_5_model(self): query = predict_query = for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: self.sql_via_http( query.format(char, char), company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) response = self.sql_via_http( predict_query.format(char), company_id=cid, expected_resp_type=RESPONSE_TYPE.TABLE ) self.assertTrue(len(response['data']), 1)
tests/integration_tests/flows/test_company_independent.py
142
mindsdb
{ "docstring": "\n CREATE MODEL mindsdb.model_{}\n FROM test_integration_{} (\n select * from test_data.home_rentals limit 50\n ) PREDICT rental_price\n USING join_learn_process=true, time_aim=5\n \n select * from mindsdb.model_{} where sqft = 100\n ", "language": "en", "n_whitespaces": 112, "n_words": 26, "vocab_size": 22 }
29
Python
24
7c02e15aa403a4ca1fa34489dd2df9136d6c961c
test_company_independent.py
117,189
23
90
test_5_model
https://github.com/mindsdb/mindsdb.git
Projects structure (#3532) Projects structure
196
0
25,918
13
1
22
def test_predictor_tableau_header(self, mock_handler): df = pd.DataFrame([ {'a': 1, 'b': 'one'}, {'a': 2, 'b': 'two'}, {'a': 1, 'b': 'three'}, ]) self.set_handler(mock_handler, name='pg', tables={'tasks': df}) # --- use predictor --- predicted_value = 5 predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical }, 'predicted_value': predicted_value } self.set_predictor(predictor) ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb')) # second column is having last value of 'b' # 3: count rows, 4: sum of 'a', 5 max of prediction assert ret.data[0] == [3, 4, 5]
tests/unit/test_executor.py
250
mindsdb
{ "docstring": "\n SELECT \n SUM(1) AS `cnt__0B4A4E8BD11C48FFB4730D4D2C32191A_ok`,\n sum(`Custom SQL Query`.`a`) AS `sum_height_ok`,\n max(`Custom SQL Query`.`p`) AS `sum_length1_ok`\n FROM (\n SELECT res.a, res.p \n FROM pg.tasks as source\n JOIN mindsdb.task_model as res\n ) `Custom SQL Query`\n HAVING (COUNT(1) > 0)\n ", "language": "en", "n_whitespaces": 176, "n_words": 35, "vocab_size": 28 }
82
Python
64
02a831997cdffafca7cb160eb1938e72020ee049
test_executor.py
116,154
32
143
test_predictor_tableau_header
https://github.com/mindsdb/mindsdb.git
executor tests
298
0
25,675
12
1
9
def test_single_file_metadata(pyi_builder): # Add directory containing the my-test-package metadata to search path extra_path = os.path.join(_MODULES_DIR, "pyi_single_file_metadata") pyi_builder.test_source( , pyi_args=['--paths', extra_path] )
tests/functional/test_misc.py
54
pyinstaller
{ "docstring": "\n import pkg_resources\n\n # The pkg_resources.get_distribution() call automatically triggers collection of the metadata. While it does not\n # raise an error if metadata is not found while freezing, the calls below will fall at run-time in that case.\n dist = pkg_resources.get_distribution('my-test-package')\n\n # Sanity check\n assert dist.project_name == 'my-test-package'\n assert dist.version == '1.0'\n assert dist.egg_name() == f'my_test_package-{dist.version}-py{sys.version_info[0]}.{sys.version_info[1]}'\n ", "language": "en", "n_whitespaces": 119, "n_words": 55, "vocab_size": 47 }
21
Python
21
460a53842a220faa70f892ab0127b6d4dd21c4eb
test_misc.py
262,791
17
31
test_single_file_metadata
https://github.com/pyinstaller/pyinstaller.git
tests: add a test for single-file metadata collection
46
0
77,372
10
2
8
def should_toggle_mask(self) -> bool: with self._lock: retval = self._triggers["toggle_mask"] if retval: logger.debug("Sending toggle mask") self._triggers["toggle_mask"] = False return retval
scripts/train.py
74
faceswap
{ "docstring": " Check whether the mask should be toggled and return the value. If ``True`` is returned\n then resets mask toggle back to ``False``\n\n Returns\n -------\n bool\n ``True`` if the mask should be toggled otherwise ``False``. ", "language": "en", "n_whitespaces": 74, "n_words": 34, "vocab_size": 26 }
19
Python
16
3c73ae4ec9f0f30649a5e20465a268bbcfd690eb
train.py
101,049
14
40
should_toggle_mask
https://github.com/deepfakes/faceswap.git
bugfix: Update preview screen in GUI
92
0
20,487
12
1
3
def shape(self): return self.table.shape
src/datasets/table.py
22
datasets
{ "docstring": "\n Dimensions of the table: (#rows, #columns).\n\n Returns:\n :obj:`(int, int)`: Number of rows and number of columns.\n ", "language": "en", "n_whitespaces": 49, "n_words": 16, "vocab_size": 14 }
4
Python
4
e35be138148333078284b942ccc9ed7b1d826f97
table.py
104,426
2
12
shape
https://github.com/huggingface/datasets.git
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
18
0
21,862
7
1
7
def add_preheated_app_session(self) -> None: session = self._create_or_reuse_app_session(ws=None) session.handle_rerun_script_request(is_preheat=True)
lib/streamlit/server/server.py
45
streamlit
{ "docstring": "Register a fake browser with the server and run the script.\n\n This is used to start running the user's script even before the first\n browser connects.\n ", "language": "en", "n_whitespaces": 47, "n_words": 26, "vocab_size": 22 }
8
Python
8
704eab3478cf69847825b23dabf15813a8ac9fa2
server.py
118,567
8
26
add_preheated_app_session
https://github.com/streamlit/streamlit.git
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
29
0
26,297
9
3
21
def sample_y(self, X, n_samples=1, random_state=0): rng = check_random_state(random_state) y_mean, y_cov = self.predict(X, return_cov=True) if y_mean.ndim == 1: y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T else: y_samples = [ rng.multivariate_normal( y_mean[:, target], y_cov[..., target], n_samples ).T[:, np.newaxis] for target in range(y_mean.shape[1]) ] y_samples = np.hstack(y_samples) return y_samples
sklearn/gaussian_process/_gpr.py
171
scikit-learn
{ "docstring": "Draw samples from Gaussian process and evaluate at X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features) or list of object\n Query points where the GP is evaluated.\n\n n_samples : int, default=1\n Number of samples drawn from the Gaussian process per query point.\n\n random_state : int, RandomState instance or None, default=0\n Determines random number generation to randomly draw samples.\n Pass an int for reproducible results across multiple function\n calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n y_samples : ndarray of shape (n_samples_X, n_samples), or \\\n (n_samples_X, n_targets, n_samples)\n Values of n_samples samples drawn from Gaussian process and\n evaluated at query points.\n ", "language": "en", "n_whitespaces": 262, "n_words": 100, "vocab_size": 73 }
44
Python
36
3786daf7dc5c301478d489b0756f90d0ac5d010f
_gpr.py
258,794
14
114
sample_y
https://github.com/scikit-learn/scikit-learn.git
BUG Fix covariance and stdev shape in GPR with normalize_y (#22199) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Nakamura-Zimmerer, Tenavi (ARC-AF) <[email protected]>
194
0
75,430
16
1
16
def test_regex_includes_scripts_for(gm_manager, url, expected_matches): gh_dark_example = textwrap.dedent(r) _save_script(gh_dark_example, 'test.user.js') gm_manager.load_scripts() scripts = gm_manager.scripts_for(QUrl(url)) assert len(scripts.start + scripts.end + scripts.idle) == expected_matches
tests/unit/javascript/test_greasemonkey.py
96
qutebrowser
{ "docstring": "Ensure our GM @*clude support supports regular expressions.\n // ==UserScript==\n // @include /^https?://((gist|guides|help|raw|status|developer)\\.)?github\\.com/((?!generated_pages\\/preview).)*$/\n // @exclude /https?://github\\.com/foo/\n // @run-at document-start\n // ==/UserScript==\n ", "language": "en", "n_whitespaces": 67, "n_words": 21, "vocab_size": 17 }
21
Python
19
21419c9ef5a90ea36a27afaf2503a57f8f9f8536
test_greasemonkey.py
320,963
12
58
test_regex_includes_scripts_for
https://github.com/qutebrowser/qutebrowser.git
greasemonkey: Don't implicitly load scripts Needed for #7245 and also seems like cleaner code.
39
0
117,467
11
3
19
def cal_predicts_accuracy(char_ops, preds, preds_lod, labels, labels_lod, is_remove_duplicate=False): acc_num = 0 img_num = 0 for ino in range(len(labels_lod) - 1): beg_no = preds_lod[ino] end_no = preds_lod[ino + 1] preds_text = preds[beg_no:end_no].reshape(-1) preds_text = char_ops.decode(preds_text, is_remove_duplicate) beg_no = labels_lod[ino] end_no = labels_lod[ino + 1] labels_text = labels[beg_no:end_no].reshape(-1) labels_text = char_ops.decode(labels_text, is_remove_duplicate) img_num += 1 if preds_text == labels_text: acc_num += 1 acc = acc_num * 1.0 / img_num return acc, acc_num, img_num
modules/image/text_recognition/ppocrv3_rec_ch/character.py
209
PaddleHub
{ "docstring": "\n Calculate prediction accuracy\n Args:\n char_ops: CharacterOps\n preds: preds result,text index\n preds_lod: lod tensor of preds\n labels: label of input image, text index\n labels_lod: lod tensor of label\n is_remove_duplicate: Whether to remove duplicate characters,\n The default is False\n Return:\n acc: The accuracy of test set\n acc_num: The correct number of samples predicted\n img_num: The total sample number of the test set\n ", "language": "en", "n_whitespaces": 169, "n_words": 60, "vocab_size": 43 }
70
Python
44
9b3119dfb63c4cbb7acfb9f1f1c09ac24e6d68d2
character.py
49,450
17
139
cal_predicts_accuracy
https://github.com/PaddlePaddle/PaddleHub.git
add module
169
0
9,747
12
1
6
def fit_predict(self, X, y=None): # As fit_predict would be different from fit.predict, fit_predict is # only available for outlier detection (novelty=False) return self.fit(X)._predict()
sklearn/neighbors/_lof.py
40
scikit-learn
{ "docstring": "Fit the model to the training set X and return the labels.\n\n **Not available for novelty detection (when novelty is set to True).**\n Label is 1 for an inlier and -1 for an outlier according to the LOF\n score and the contamination parameter.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. to the training samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n is_inlier : ndarray of shape (n_samples,)\n Returns -1 for anomalies/outliers and 1 for inliers.\n ", "language": "en", "n_whitespaces": 219, "n_words": 98, "vocab_size": 67 }
23
Python
21
60cc5b596f38d0d236dab34e02c05d98b5a72bad
_lof.py
260,993
2
23
fit_predict
https://github.com/scikit-learn/scikit-learn.git
FEA Fused sparse-dense support for `PairwiseDistancesReduction` (#23585) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Christian Lorentzen <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Meekail Zain <[email protected]>
51
0
76,611
9
3
24
def get_attendance_years() -> str: Attendance = frappe.qb.DocType('Attendance') year_list = ( frappe.qb.from_(Attendance) .select(Extract('year', Attendance.attendance_date).as_('year')) .distinct() ).run(as_dict=True) if year_list: year_list.sort(key=lambda d: d.year, reverse=True) else: year_list = [getdate().year] return "\n".join(cstr(entry.year) for entry in year_list)
erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py
177
erpnext
{ "docstring": "Returns all the years for which attendance records exist", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
31
Python
28
e79d292233000985a04c5d46859513c1e0d7c88c
monthly_attendance_sheet.py
68,213
13
104
get_attendance_years
https://github.com/frappe/erpnext.git
refactor: Monthly Attendance Sheet - split into smaller functions - add type hints - get rid of unnecessary db calls and loops - add docstrings for functions
19
0
14,743
18
2
10
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ConvNextModel): module.gradient_checkpointing = value CONVNEXT_START_DOCSTRING = r CONVNEXT_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare ConvNext model outputting raw features without any specific head on top.", CONVNEXT_START_DOCSTRING, )
src/transformers/models/convnext/modeling_convnext.py
64
@add_start_docstrings( "The bare ConvNext model outputting raw features without any specific head on top.", CONVNEXT_START_DOCSTRING, )
transformers
{ "docstring": "\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See\n [`AutoFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 248, "n_words": 128, "vocab_size": 88 }
32
Python
29
84eec9e6ba55c5aceee2a92fd820fcca4b67c510
modeling_convnext.py
34,876
3
24
_set_gradient_checkpointing
https://github.com/huggingface/transformers.git
Add ConvNeXT (#15277) * First draft * Add conversion script * Improve conversion script * Improve docs and implement tests * Define model output class * Fix tests * Fix more tests * Add model to README * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply more suggestions from code review * Apply suggestions from code review * Rename dims to hidden_sizes * Fix equivalence test * Rename gamma to gamma_parameter * Clean up conversion script * Add ConvNextFeatureExtractor * Add corresponding tests * Implement feature extractor correctly * Make implementation cleaner * Add ConvNextStem class * Improve design * Update design to also include encoder * Fix gamma parameter * Use sample docstrings * Finish conversion, add center cropping * Replace nielsr by facebook, make feature extractor tests smaller * Fix integration test Co-authored-by: Sylvain Gugger <[email protected]>
51
1
6,354
9
1
10
def test_rbf_sampler_gamma_scale(): X, y = [[0.0], [1.0]], [0, 1] rbf = RBFSampler(gamma="scale") rbf.fit(X, y) assert rbf._gamma == pytest.approx(4)
sklearn/tests/test_kernel_approximation.py
83
scikit-learn
{ "docstring": "Check the inner value computed when `gamma='scale'`.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
18
Python
17
61ae92a7786baa132970cdc69da786f9952d8bda
test_kernel_approximation.py
261,468
5
55
test_rbf_sampler_gamma_scale
https://github.com/scikit-learn/scikit-learn.git
ENH Add gamma='scale' option to RBFSampler (#24755) Co-authored-by: Guillaume Lemaitre <[email protected]>
33
0
76,824
10
3
8
def vocab_size(self) -> int: if self.is_category_target: return self.model.training_set_metadata[self.target_feature_name]["vocab_size"] elif self.is_binary_target: return 2 return 1
ludwig/explain/explainer.py
60
ludwig
{ "docstring": "The vocab size of the target feature.\n\n For regression (number) this is 1, for binary it is 2, and for category it is the vocab size.\n ", "language": "en", "n_whitespaces": 40, "n_words": 26, "vocab_size": 20 }
14
Python
12
1caede3a2da4ec71cb8650c7e45120c26948a5b9
explainer.py
8,246
10
36
vocab_size
https://github.com/ludwig-ai/ludwig.git
Explanation API and feature importance for GBM (#2564) * add docstring for explain_ig * solidify Explainer API * add gbm explainer * add dataclasses for typed explanations * add GBM feature importance * remove unused imports * add tests * fix test * extract explanation into file * rename base to explainer * remove unused kwargs * remove device placement from base explainer * use proper field from gbm
64
0
1,380
11
1
2
def zaxis(self): return self["zaxis"]
packages/python/plotly/plotly/graph_objs/layout/_scene.py
22
plotly.py
{ "docstring": "\n The 'zaxis' property is an instance of ZAxis\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.layout.scene.ZAxis`\n - A dict of string/value properties that will be passed\n to the ZAxis constructor\n\n Supported dict properties:\n\n autorange\n Determines whether or not the range of this\n axis is computed in relation to the input data.\n See `rangemode` for more info. If `range` is\n provided, then `autorange` is set to False.\n autotypenumbers\n Using \"strict\" a numeric string in trace data\n is not converted to a number. Using *convert\n types* a numeric string in trace data may be\n treated as a number during automatic axis\n `type` detection. Defaults to\n layout.autotypenumbers.\n backgroundcolor\n Sets the background color of this axis' wall.\n calendar\n Sets the calendar system to use for `range` and\n `tick0` if this is a date axis. This does not\n set the calendar for interpreting data on this\n axis, that's specified in the trace or via the\n global `layout.calendar`\n categoryarray\n Sets the order in which categories on this axis\n appear. Only has an effect if `categoryorder`\n is set to \"array\". Used with `categoryorder`.\n categoryarraysrc\n Sets the source reference on Chart Studio Cloud\n for `categoryarray`.\n categoryorder\n Specifies the ordering logic for the case of\n categorical variables. By default, plotly uses\n \"trace\", which specifies the order that is\n present in the data supplied. Set\n `categoryorder` to *category ascending* or\n *category descending* if order should be\n determined by the alphanumerical order of the\n category names. Set `categoryorder` to \"array\"\n to derive the ordering from the attribute\n `categoryarray`. If a category is not found in\n the `categoryarray` array, the sorting behavior\n for that attribute will be identical to the\n \"trace\" mode. The unspecified categories will\n follow the categories in `categoryarray`. Set\n `categoryorder` to *total ascending* or *total\n descending* if order should be determined by\n the numerical order of the values. Similarly,\n the order can be determined by the min, max,\n sum, mean or median of all the values.\n color\n Sets default for all colors associated with\n this axis all at once: line, font, tick, and\n grid colors. Grid color is lightened by\n blending this with the plot background\n Individual pieces can override this.\n dtick\n Sets the step in-between ticks on this axis.\n Use with `tick0`. Must be a positive number, or\n special strings available to \"log\" and \"date\"\n axes. If the axis `type` is \"log\", then ticks\n are set every 10^(n*dtick) where n is the tick\n number. For example, to set a tick mark at 1,\n 10, 100, 1000, ... set dtick to 1. To set tick\n marks at 1, 100, 10000, ... set dtick to 2. To\n set tick marks at 1, 5, 25, 125, 625, 3125, ...\n set dtick to log_10(5), or 0.69897000433. \"log\"\n has several special values; \"L<f>\", where `f`\n is a positive number, gives ticks linearly\n spaced in value (but not position). For example\n `tick0` = 0.1, `dtick` = \"L0.5\" will put ticks\n at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10\n plus small digits between, use \"D1\" (all\n digits) or \"D2\" (only 2 and 5). `tick0` is\n ignored for \"D1\" and \"D2\". If the axis `type`\n is \"date\", then you must convert the time to\n milliseconds. For example, to set the interval\n between ticks to one day, set `dtick` to\n 86400000.0. \"date\" also has special values\n \"M<n>\" gives ticks spaced by a number of\n months. `n` must be a positive integer. To set\n ticks on the 15th of every third month, set\n `tick0` to \"2000-01-15\" and `dtick` to \"M3\". To\n set ticks every 4 years, set `dtick` to \"M48\"\n exponentformat\n Determines a formatting rule for the tick\n exponents. For example, consider the number\n 1,000,000,000. If \"none\", it appears as\n 1,000,000,000. If \"e\", 1e+9. If \"E\", 1E+9. If\n \"power\", 1x10^9 (with 9 in a super script). If\n \"SI\", 1G. If \"B\", 1B.\n gridcolor\n Sets the color of the grid lines.\n gridwidth\n Sets the width (in px) of the grid lines.\n hoverformat\n Sets the hover text formatting rule using d3\n formatting mini-languages which are very\n similar to those in Python. For numbers, see: h\n ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f\n ormat. And for dates see:\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two\n items to d3's date formatter: \"%h\" for half of\n the year as a decimal number as well as \"%{n}f\"\n for fractional seconds with n digits. For\n example, *2016-10-13 09:15:23.456* with\n tickformat \"%H~%M~%S.%2f\" would display\n \"09~15~23.46\"\n linecolor\n Sets the axis line color.\n linewidth\n Sets the width (in px) of the axis line.\n minexponent\n Hide SI prefix for 10^n if |n| is below this\n number. This only has an effect when\n `tickformat` is \"SI\" or \"B\".\n mirror\n Determines if the axis lines or/and ticks are\n mirrored to the opposite side of the plotting\n area. If True, the axis lines are mirrored. If\n \"ticks\", the axis lines and ticks are mirrored.\n If False, mirroring is disable. If \"all\", axis\n lines are mirrored on all shared-axes subplots.\n If \"allticks\", axis lines and ticks are\n mirrored on all shared-axes subplots.\n nticks\n Specifies the maximum number of ticks for the\n particular axis. The actual number of ticks\n will be chosen automatically to be less than or\n equal to `nticks`. Has an effect only if\n `tickmode` is set to \"auto\".\n range\n Sets the range of this axis. If the axis `type`\n is \"log\", then you must take the log of your\n desired range (e.g. to set the range from 1 to\n 100, set the range from 0 to 2). If the axis\n `type` is \"date\", it should be date strings,\n like date data, though Date objects and unix\n milliseconds will be accepted and converted to\n strings. If the axis `type` is \"category\", it\n should be numbers, using the scale where each\n category is assigned a serial number from zero\n in the order it appears.\n rangemode\n If \"normal\", the range is computed in relation\n to the extrema of the input data. If *tozero*`,\n the range extends to 0, regardless of the input\n data If \"nonnegative\", the range is non-\n negative, regardless of the input data. Applies\n only to linear axes.\n separatethousands\n If \"true\", even 4-digit integers are separated\n showaxeslabels\n Sets whether or not this axis is labeled\n showbackground\n Sets whether or not this axis' wall has a\n background color.\n showexponent\n If \"all\", all exponents are shown besides their\n significands. If \"first\", only the exponent of\n the first tick is shown. If \"last\", only the\n exponent of the last tick is shown. If \"none\",\n no exponents appear.\n showgrid\n Determines whether or not grid lines are drawn.\n If True, the grid lines are drawn at every tick\n mark.\n showline\n Determines whether or not a line bounding this\n axis is drawn.\n showspikes\n Sets whether or not spikes starting from data\n points to this axis' wall are shown on hover.\n showticklabels\n Determines whether or not the tick labels are\n drawn.\n showtickprefix\n If \"all\", all tick labels are displayed with a\n prefix. If \"first\", only the first tick is\n displayed with a prefix. If \"last\", only the\n last tick is displayed with a suffix. If\n \"none\", tick prefixes are hidden.\n showticksuffix\n Same as `showtickprefix` but for tick suffixes.\n spikecolor\n Sets the color of the spikes.\n spikesides\n Sets whether or not spikes extending from the\n projection data points to this axis' wall\n boundaries are shown on hover.\n spikethickness\n Sets the thickness (in px) of the spikes.\n tick0\n Sets the placement of the first tick on this\n axis. Use with `dtick`. If the axis `type` is\n \"log\", then you must take the log of your\n starting tick (e.g. to set the starting tick to\n 100, set the `tick0` to 2) except when\n `dtick`=*L<f>* (see `dtick` for more info). If\n the axis `type` is \"date\", it should be a date\n string, like date data. If the axis `type` is\n \"category\", it should be a number, using the\n scale where each category is assigned a serial\n number from zero in the order it appears.\n tickangle\n Sets the angle of the tick labels with respect\n to the horizontal. For example, a `tickangle`\n of -90 draws the tick labels vertically.\n tickcolor\n Sets the tick color.\n tickfont\n Sets the tick font.\n tickformat\n Sets the tick label formatting rule using d3\n formatting mini-languages which are very\n similar to those in Python. For numbers, see: h\n ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f\n ormat. And for dates see:\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two\n items to d3's date formatter: \"%h\" for half of\n the year as a decimal number as well as \"%{n}f\"\n for fractional seconds with n digits. For\n example, *2016-10-13 09:15:23.456* with\n tickformat \"%H~%M~%S.%2f\" would display\n \"09~15~23.46\"\n tickformatstops\n A tuple of :class:`plotly.graph_objects.layout.\n scene.zaxis.Tickformatstop` instances or dicts\n with compatible properties\n tickformatstopdefaults\n When used in a template (as layout.template.lay\n out.scene.zaxis.tickformatstopdefaults), sets\n the default property values to use for elements\n of layout.scene.zaxis.tickformatstops\n ticklen\n Sets the tick length (in px).\n tickmode\n Sets the tick mode for this axis. If \"auto\",\n the number of ticks is set via `nticks`. If\n \"linear\", the placement of the ticks is\n determined by a starting position `tick0` and a\n tick step `dtick` (\"linear\" is the default\n value if `tick0` and `dtick` are provided). If\n \"array\", the placement of the ticks is set via\n `tickvals` and the tick text is `ticktext`.\n (\"array\" is the default value if `tickvals` is\n provided).\n tickprefix\n Sets a tick label prefix.\n ticks\n Determines whether ticks are drawn or not. If\n \"\", this axis' ticks are not drawn. If\n \"outside\" (\"inside\"), this axis' are drawn\n outside (inside) the axis lines.\n ticksuffix\n Sets a tick label suffix.\n ticktext\n Sets the text displayed at the ticks position\n via `tickvals`. Only has an effect if\n `tickmode` is set to \"array\". Used with\n `tickvals`.\n ticktextsrc\n Sets the source reference on Chart Studio Cloud\n for `ticktext`.\n tickvals\n Sets the values at which ticks on this axis\n appear. Only has an effect if `tickmode` is set\n to \"array\". Used with `ticktext`.\n tickvalssrc\n Sets the source reference on Chart Studio Cloud\n for `tickvals`.\n tickwidth\n Sets the tick width (in px).\n title\n :class:`plotly.graph_objects.layout.scene.zaxis\n .Title` instance or dict with compatible\n properties\n titlefont\n Deprecated: Please use\n layout.scene.zaxis.title.font instead. Sets\n this axis' title font. Note that the title's\n font used to be customized by the now\n deprecated `titlefont` attribute.\n type\n Sets the axis type. By default, plotly attempts\n to determined the axis type by looking into the\n data of the traces that referenced the axis in\n question.\n visible\n A single toggle to hide the axis while\n preserving interaction like dragging. Default\n is true when a cheater plot is present on the\n axis, otherwise false\n zeroline\n Determines whether or not a line is drawn at\n along the 0 value of this axis. If True, the\n zero line is drawn on top of the grid lines.\n zerolinecolor\n Sets the line color of the zero line.\n zerolinewidth\n Sets the width (in px) of the zero line.\n\n Returns\n -------\n plotly.graph_objs.layout.scene.ZAxis\n ", "language": "en", "n_whitespaces": 7328, "n_words": 1773, "vocab_size": 608 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_scene.py
231,647
2
11
zaxis
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,091
7
1
5
def get_deployment_statuses() -> Dict[str, DeploymentStatusInfo]: return internal_get_global_client().get_deployment_statuses()
python/ray/serve/api.py
35
ray
{ "docstring": "Returns a dictionary of deployment statuses.\n\n A deployment's status is one of {UPDATING, UNHEALTHY, and HEALTHY}.\n\n Example:\n >>> from ray.serve.api import get_deployment_statuses\n >>> statuses = get_deployment_statuses() # doctest: +SKIP\n >>> status_info = statuses[\"deployment_name\"] # doctest: +SKIP\n >>> status = status_info.status # doctest: +SKIP\n >>> message = status_info.message # doctest: +SKIP\n\n Returns:\n Dict[str, DeploymentStatus]: This dictionary maps the running\n deployment's name to a DeploymentStatus object containing its\n status and a message explaining the status.\n ", "language": "en", "n_whitespaces": 141, "n_words": 73, "vocab_size": 47 }
7
Python
7
60054995e65304fb14e6d0ab69bdec07aa9389fe
api.py
147,381
18
20
get_deployment_statuses
https://github.com/ray-project/ray.git
[docs] fix doctests and activate CI (#23418)
13
0
33,930
9
15
7
def lexicographical_topological_sort(G, key=None): if not G.is_directed(): msg = "Topological sort not defined on undirected graphs." raise nx.NetworkXError(msg) if key is None:
networkx/algorithms/dag.py
58
networkx
{ "docstring": "Generate the nodes in the unique lexicographical topological sort order.\n\n Generates a unique ordering of nodes by first sorting topologically (for which there are often\n multiple valid orderings) and then additionally by sorting lexicographically.\n\n A topological sort arranges the nodes of a directed graph so that the\n upstream node of each directed edge precedes the downstream node.\n It is always possible to find a solution for directed graphs that have no cycles.\n There may be more than one valid solution.\n\n Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the\n topological sort and to determine a single, unique ordering. This can be useful in comparing\n sort results.\n\n The lexicographical order can be customized by providing a function to the `key=` parameter.\n The definition of the key function is the same as used in python's built-in `sort()`.\n The function takes a single argument and returns a key to use for sorting purposes.\n\n Lexicographical sorting can fail if the node names are un-sortable. See the example below.\n The solution is to provide a function to the `key=` argument that returns sortable keys.\n\n\n Parameters\n ----------\n G : NetworkX digraph\n A directed acyclic graph (DAG)\n\n key : function, optional\n A function of one argument that converts a node name to a comparison key.\n It defines and resolves ambiguities in the sort order. Defaults to the identity function.\n\n Yields\n ------\n nodes\n Yields the nodes of G in lexicographical topological sort order.\n\n Raises\n ------\n NetworkXError\n Topological sort is defined for directed graphs only. If the graph `G`\n is undirected, a :exc:`NetworkXError` is raised.\n\n NetworkXUnfeasible\n If `G` is not a directed acyclic graph (DAG) no topological sort exists\n and a :exc:`NetworkXUnfeasible` exception is raised. This can also be\n raised if `G` is changed while the returned iterator is being processed\n\n RuntimeError\n If `G` is changed while the returned iterator is being processed.\n\n TypeError\n Results from un-sortable node names.\n Consider using `key=` parameter to resolve ambiguities in the sort order.\n\n Examples\n --------\n >>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)])\n >>> list(nx.lexicographical_topological_sort(DG))\n [2, 1, 3, 5, 4]\n >>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x))\n [2, 5, 1, 4, 3]\n\n The sort will fail for any graph with integer and string nodes. Comparison of integer to strings\n is not defined in python. Is 3 greater or less than 'red'?\n\n >>> DG = nx.DiGraph([(1, 'red'), (3, 'red'), (1, 'green'), (2, 'blue')])\n >>> list(nx.lexicographical_topological_sort(DG))\n Traceback (most recent call last):\n ...\n TypeError: '<' not supported between instances of 'str' and 'int'\n ...\n\n Incomparable nodes can be resolved using a `key` function. This example function\n allows comparison of integers and strings by returning a tuple where the first\n element is True for `str`, False otherwise. The second element is the node name.\n This groups the strings and integers separately so they can be compared only among themselves.\n\n >>> key = lambda node: (isinstance(node, str), node)\n >>> list(nx.lexicographical_topological_sort(DG, key=key))\n [1, 2, 3, 'blue', 'green', 'red']\n\n Notes\n -----\n This algorithm is based on a description and proof in\n \"Introduction to Algorithms: A Creative Approach\" [1]_ .\n\n See also\n --------\n topological_sort\n\n References\n ----------\n .. [1] Manber, U. (1989).\n *Introduction to Algorithms - A Creative Approach.* Addison-Wesley.\n ", "language": "en", "n_whitespaces": 802, "n_words": 528, "vocab_size": 279 }
21
Python
19
99a925f695080787d077f620972c6552c4b0b4ba
dag.py
177,149
32
212
lexicographical_topological_sort
https://github.com/networkx/networkx.git
docstring update to lexicographical_topological_sort issue 5681 (#5930) * docstring update to lex-topo-sort - explain effect and purpose for lexi sort - add hints for fixing non-sortable nodes - add hint to exception msg - Add examples * Shorten the first line of the doc_string Co-authored-by: Dan Schult <[email protected]> * Generalize the description of sort failures Co-authored-by: Dan Schult <[email protected]> * more succinct description of key function Co-authored-by: Dan Schult <[email protected]> * improve description of key function Co-authored-by: Dan Schult <[email protected]> * Black'd it. Co-authored-by: Dan Schult <[email protected]>
44
0
42,289
9
1
17
def test_run_summarization_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_rouge1"], 10) self.assertGreaterEqual(result["eval_rouge2"], 2) self.assertGreaterEqual(result["eval_rougeL"], 7) self.assertGreaterEqual(result["eval_rougeLsum"], 7) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "summarization_no_trainer")))
examples/pytorch/test_accelerate_examples.py
213
transformers
{ "docstring": "\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ", "language": "en", "n_whitespaces": 157, "n_words": 17, "vocab_size": 16 }
26
Python
22
99eb9b523f9b9ea6096323ce5610ce6633acc88a
test_accelerate_examples.py
32,333
24
122
test_run_summarization_no_trainer
https://github.com/huggingface/transformers.git
Fix `no_trainer` CI (#18242) * Fix all tests
95
0
5,907
12
3
21
def test_views(self, postgres_db): query = for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: self.sql_via_http( query.format(f'test_view_{char}', char), company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) tables = self.get_tables_in('mindsdb', cid) self.assert_list( tables, { 'models', 'models_versions', f'test_view_{char}' } ) for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: response = self.sql_via_http( f"select * from mindsdb.test_view_{char}", company_id=cid, expected_resp_type=RESPONSE_TYPE.TABLE ) assert len(response['data']) == 50 response = self.sql_via_http( f"DROP VIEW mindsdb.test_view_{char}", company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) tables = self.get_tables_in('mindsdb', cid) self.assert_list( tables, { 'models', 'models_versions' } ) self.sql_via_http( f"select * from mindsdb.test_view_{char}", company_id=cid, expected_resp_type=RESPONSE_TYPE.ERROR )
tests/integration_tests/flows/test_company_independent.py
309
mindsdb
{ "docstring": "\n CREATE VIEW mindsdb.{}\n FROM test_integration_{} (\n select * from rentals limit 50\n )\n ", "language": "en", "n_whitespaces": 69, "n_words": 13, "vocab_size": 13 }
81
Python
43
b96825c643cb2ce062d80868a5b7824d99bca07f
test_company_independent.py
118,187
54
200
test_views
https://github.com/mindsdb/mindsdb.git
fix tests
602
0
26,187
13
7
27
def plot(*args, show=True, **kwargs): args = list(map(sympify, args)) free = set() for a in args: if isinstance(a, Expr): free |= a.free_symbols if len(free) > 1: raise ValueError( 'The same variable should be used in all ' 'univariate expressions being plotted.') x = free.pop() if free else Symbol('x') kwargs.setdefault('xlabel', x) kwargs.setdefault('ylabel', Function('f')(x)) series = [] plot_expr = check_arguments(args, 1, 1) series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr] plots = Plot(*series, **kwargs) if show: plots.show() return plots
sympy/plotting/plot.py
246
sympy
{ "docstring": "Plots a function of a single variable as a curve.\n\n Parameters\n ==========\n\n args :\n The first argument is the expression representing the function\n of single variable to be plotted.\n\n The last argument is a 3-tuple denoting the range of the free\n variable. e.g. ``(x, 0, 5)``\n\n Typical usage examples are in the followings:\n\n - Plotting a single expression with a single range.\n ``plot(expr, range, **kwargs)``\n - Plotting a single expression with the default range (-10, 10).\n ``plot(expr, **kwargs)``\n - Plotting multiple expressions with a single range.\n ``plot(expr1, expr2, ..., range, **kwargs)``\n - Plotting multiple expressions with multiple ranges.\n ``plot((expr1, range1), (expr2, range2), ..., **kwargs)``\n\n It is best practice to specify range explicitly because default\n range may change in the future if a more advanced default range\n detection algorithm is implemented.\n\n show : bool, optional\n The default value is set to ``True``. Set show to ``False`` and\n the function will not display the plot. The returned instance of\n the ``Plot`` class can then be used to save or display the plot\n by calling the ``save()`` and ``show()`` methods respectively.\n\n line_color : string, or float, or function, optional\n Specifies the color for the plot.\n See ``Plot`` to see how to set color for the plots.\n Note that by setting ``line_color``, it would be applied simultaneously\n to all the series.\n\n title : str, optional\n Title of the plot. It is set to the latex representation of\n the expression, if the plot has only one expression.\n\n label : str, optional\n The label of the expression in the plot. It will be used when\n called with ``legend``. Default is the name of the expression.\n e.g. ``sin(x)``\n\n xlabel : str or expression, optional\n Label for the x-axis.\n\n ylabel : str or expression, optional\n Label for the y-axis.\n\n xscale : 'linear' or 'log', optional\n Sets the scaling of the x-axis.\n\n yscale : 'linear' or 'log', optional\n Sets the scaling of the y-axis.\n\n axis_center : (float, float), optional\n Tuple of two floats denoting the coordinates of the center or\n {'center', 'auto'}\n\n xlim : (float, float), optional\n Denotes the x-axis limits, ``(min, max)```.\n\n ylim : (float, float), optional\n Denotes the y-axis limits, ``(min, max)```.\n\n annotations : list, optional\n A list of dictionaries specifying the type of annotation\n required. The keys in the dictionary should be equivalent\n to the arguments of the matplotlib's annotate() function.\n\n markers : list, optional\n A list of dictionaries specifying the type the markers required.\n The keys in the dictionary should be equivalent to the arguments\n of the matplotlib's plot() function along with the marker\n related keyworded arguments.\n\n rectangles : list, optional\n A list of dictionaries specifying the dimensions of the\n rectangles to be plotted. The keys in the dictionary should be\n equivalent to the arguments of the matplotlib's\n patches.Rectangle class.\n\n fill : dict, optional\n A dictionary specifying the type of color filling required in\n the plot. The keys in the dictionary should be equivalent to the\n arguments of the matplotlib's fill_between() function.\n\n adaptive : bool, optional\n The default value is set to ``True``. Set adaptive to ``False``\n and specify ``nb_of_points`` if uniform sampling is required.\n\n The plotting uses an adaptive algorithm which samples\n recursively to accurately plot. The adaptive algorithm uses a\n random point near the midpoint of two points that has to be\n further sampled. Hence the same plots can appear slightly\n different.\n\n depth : int, optional\n Recursion depth of the adaptive algorithm. A depth of value\n ``n`` samples a maximum of `2^{n}` points.\n\n If the ``adaptive`` flag is set to ``False``, this will be\n ignored.\n\n nb_of_points : int, optional\n Used when the ``adaptive`` is set to ``False``. The function\n is uniformly sampled at ``nb_of_points`` number of points.\n\n If the ``adaptive`` flag is set to ``True``, this will be\n ignored.\n\n size : (float, float), optional\n A tuple in the form (width, height) in inches to specify the size of\n the overall figure. The default value is set to ``None``, meaning\n the size will be set by the default backend.\n\n Examples\n ========\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> from sympy import symbols\n >>> from sympy.plotting import plot\n >>> x = symbols('x')\n\n Single Plot\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> plot(x**2, (x, -5, 5))\n Plot object containing:\n [0]: cartesian line: x**2 for x over (-5.0, 5.0)\n\n Multiple plots with single range.\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> plot(x, x**2, x**3, (x, -5, 5))\n Plot object containing:\n [0]: cartesian line: x for x over (-5.0, 5.0)\n [1]: cartesian line: x**2 for x over (-5.0, 5.0)\n [2]: cartesian line: x**3 for x over (-5.0, 5.0)\n\n Multiple plots with different ranges.\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))\n Plot object containing:\n [0]: cartesian line: x**2 for x over (-6.0, 6.0)\n [1]: cartesian line: x for x over (-5.0, 5.0)\n\n No adaptive sampling.\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> plot(x**2, adaptive=False, nb_of_points=400)\n Plot object containing:\n [0]: cartesian line: x**2 for x over (-10.0, 10.0)\n\n See Also\n ========\n\n Plot, LineOver1DRangeSeries\n\n ", "language": "en", "n_whitespaces": 1639, "n_words": 831, "vocab_size": 317 }
76
Python
59
1473b1782d0e440c17ee0ce6283bff0aa7f515af
plot.py
197,253
20
148
plot
https://github.com/sympy/sympy.git
Use LaTeX for labels in matplotlib backend
204
0
48,412
15
1
4
def setcbreak(filehandle): set_console_mode(filehandle, CBREAK_MODE)
src/textual/drivers/win32.py
22
textual
{ "docstring": "\n Args:\n filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`\n\n Raises:\n OSError: Error calling Windows API\n\n Convenience function which mimics :py:func:`tty.setcbreak` behavior\n\n All console input options are disabled except ``ENABLE_PROCESSED_INPUT``\n and, if supported, ``ENABLE_VIRTUAL_TERMINAL_INPUT``\n ", "language": "en", "n_whitespaces": 66, "n_words": 33, "vocab_size": 32 }
4
Python
4
54e63428644710112215c4f2d27cd64daeeda6fa
win32.py
182,009
2
12
setcbreak
https://github.com/Textualize/textual.git
windows driver
10
0
43,728
7
1
9
def test_pandas_arff_parser_strip_double_quotes(parser_func): pd = pytest.importorskip("pandas") arff_file = BytesIO( textwrap.dedent(
sklearn/datasets/tests/test_arff_parser.py
39
arff_file = BytesIO( textwrap.dedent( """
scikit-learn
{ "docstring": "Check that we properly strip double quotes from the data.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
9
Python
8
8515b486810e844bc7f5f1a4fb2227405d46871e
test_arff_parser.py
260,158
54
186
test_pandas_arff_parser_strip_double_quotes
https://github.com/scikit-learn/scikit-learn.git
FIX make pandas and liac arff parser quoting behaviour closer (#23497) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Loïc Estève <[email protected]>
37
1
76,098
9
6
10
def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs): if ie is not None: kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key() if video_id is not None: kwargs['id'] = video_id if video_title is not None: kwargs['title'] = video_title return { **kwargs, '_type': 'url_transparent' if url_transparent else 'url', 'url': url, }
yt_dlp/extractor/common.py
151
yt-dlp
{ "docstring": "Returns a URL that points to a page that should be processed", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
49
Python
33
311b6615d85d3530f2709c50e4223ff3b6b14361
common.py
162,332
12
94
url_result
https://github.com/yt-dlp/yt-dlp.git
[extractor] Improve `url_result` and related
157
0
39,190
11
4
12
def stamp(self, visitor=None, **headers): headers = headers.copy() if visitor is not None: headers.update(visitor.on_signature(self, **headers)) else: headers["stamped_headers"] = [header for header in headers.keys() if header not in self.options] _merge_dictionaries(headers, self.options) return self.set(**headers)
celery/canvas.py
130
celery
{ "docstring": "Apply this task asynchronously.\n\n Arguments:\n visitor (StampingVisitor): Visitor API object.\n headers (Dict): Stamps that should be added to headers.\n ", "language": "en", "n_whitespaces": 55, "n_words": 19, "vocab_size": 19 }
31
Python
26
1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc
canvas.py
208,054
8
81
stamp
https://github.com/celery/celery.git
Canvas Header Stamping (#7384) * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Redo header stamping (#7341) * _freeze_gid dict merge fixed * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Omer Katz <[email protected]> * Added stamping mechanism * Manual stamping improved * flake8 fixed * Added subtests * Add comma. * Moved groups to stamps * Fixed chord and added test for that * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * Added test for simple test for chord and fixed chord implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * Fixed lint and elements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * type -> isinstance * Added stamping mechanism * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Manual stamping improved * fail_ci_if_error uncommented * flake8 fixed * Added subtests * Changes * Add comma. * Fixed chord and added test for that * canvas.py fixed * Test chord.py fixed * Fixed stamped_headers * collections import fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * collections import fixed * Update celery/backends/base.py Co-authored-by: Omer Katz <[email protected]> * ampq.py fixed * Refrain from using deprecated import path. * Fix test_complex_chain regression. Whenever we stamp a group we need to freeze it first if it wasn't already frozen. Somewhere along the line, the group id changed because we were freezing twice. This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow. We don't know why yet. * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed issues with maybe_list. Add documentation * Fixed potential issue with integration tests * Fixed issues with _regen * Fixed issues with _regen * Fixed test_generator issues * Fixed _regen stamping * Fixed _regen stamping * Fixed TimeOut issue * Fixed TimeOut issue * Fixed TimeOut issue * Update docs/userguide/canvas.rst Co-authored-by: Omer Katz <[email protected]> * Fixed Couchbase * Better stamping intro * New GroupVisitor example * Adjust documentation. Co-authored-by: Naomi Elstein <[email protected]> Co-authored-by: Omer Katz <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Asif Saif Uddin <[email protected]> Co-authored-by: Omer Katz <[email protected]>
99
0
52,183
13
1
3
def input_specs_inference(self) -> ModelSpec: return ModelSpec()
rllib/core/rl_module/rl_module.py
23
ray
{ "docstring": "Returns the input specs of the forward_inference method.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
6
Python
6
37de814a8598e0ea3dea23d5ae0caf9df54fa0e6
rl_module.py
134,355
3
12
input_specs_inference
https://github.com/ray-project/ray.git
[RLlib] RLModule base class, RLModule PR 3/N (#29642) Signed-off-by: Kourosh Hakhamaneshi <[email protected]>
20
0
30,267
7
1
13
def test_payment_refund_or_void_void_called(void_mock, payment): # given payment.can_void = Mock(return_value=True) assert payment.can_void() is True payment.transactions.count() == 0 # when gateway.payment_refund_or_void(payment, get_plugins_manager(), None) # then assert void_mock.called_once() @patch("saleor.payment.gateway.void")
saleor/payment/tests/test_gateway.py
101
@patch("saleor.payment.gateway.void")
saleor
{ "docstring": "Ensure that the refund method is called when payment can be voided\n and there is no void transaction for given payment.", "language": "en", "n_whitespaces": 23, "n_words": 21, "vocab_size": 20 }
25
Python
22
0881beec1ac02dfa97525c5173687defb356d85c
test_gateway.py
26,688
6
53
test_payment_refund_or_void_void_called
https://github.com/saleor/saleor.git
Fix payment flow (#9504) * Do not capture payment again when it should be refunded or voided * Do not create order when then is ongoing refund
51
1
5,047
9
2
11
def idxmin(self, axis=0, skipna=True, *args, **kwargs): i = self.argmin(axis, skipna, *args, **kwargs) if i == -1: return np.nan return self.index[i]
pandas/core/series.py
80
pandas
{ "docstring": "\n Return the row label of the minimum value.\n\n If multiple values equal the minimum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n axis : {0 or 'index'}\n Unused. Parameter needed for compatibility with DataFrame.\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Index\n Label of the minimum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n numpy.argmin : Return indices of the minimum values\n along the given axis.\n DataFrame.idxmin : Return index of first occurrence of minimum\n over requested axis.\n Series.idxmax : Return index *label* of the first occurrence\n of maximum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmin``. This method\n returns the label of the minimum, while ``ndarray.argmin`` returns\n the position. To get the position, use ``series.values.argmin()``.\n\n Examples\n --------\n >>> s = pd.Series(data=[1, None, 4, 1],\n ... index=['A', 'B', 'C', 'D'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 1.0\n dtype: float64\n\n >>> s.idxmin()\n 'A'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmin(skipna=False)\n nan\n ", "language": "en", "n_whitespaces": 631, "n_words": 207, "vocab_size": 132 }
20
Python
17
244f747bb63f45c1c439193f0672c6162853b168
series.py
166,614
5
53
idxmin
https://github.com/pandas-dev/pandas.git
make series axis parameter docs consistent (#47109) * make series docs consistent add series unused param info to DF docs * fix trailing whitespace * fix docs build * add unused * add or update docs for all series methods * small fix * fix line length * fix param order * fix param order * add * add backticks to None and fix space Co-authored-by: uncjackg <[email protected]>
59
0
39,843
9
1
15
def test_04_query_predictor_single_where_condition(self): time.sleep(120) # TODO query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.TABLE) self.assertTrue(len(response.data_frame) == 1) self.assertTrue(response.data_frame['sqft'][0] == 100) self.assertTrue(response.data_frame['rental_price'][0] is not None)
mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py
143
mindsdb
{ "docstring": "\n SELECT target\n from {self.test_model_1}\n WHERE sqft=100\n ", "language": "en", "n_whitespaces": 47, "n_words": 6, "vocab_size": 6 }
24
Python
21
b999051fd8153a1d3624471cac5483867116f985
test_lightwood_handler.py
116,582
12
83
test_04_query_predictor_single_where_condition
https://github.com/mindsdb/mindsdb.git
test fix
73
0
25,781
11
1
19
def test_archive_too_large_for_disk_cache(self, cache_getfile): release = Release.objects.create(version="1", organization_id=self.project.organization_id) self._create_archive(release, "foo") # cache.getfile is only called for index, not for the archive with override_options({"releasefile.cache-max-archive-size": 9}): result = fetch_release_archive_for_url(release, dist=None, url="foo") assert result is not None result.close() assert len(cache_getfile.mock_calls) == 1
tests/sentry/lang/javascript/test_processor.py
134
sentry
{ "docstring": "ReleaseFile.cache is not used if the archive is too large", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
38
Python
32
8cdaa4e86e8296cdbc145f2a53d3eb38cb7a1c2b
test_processor.py
90,777
8
79
test_archive_too_large_for_disk_cache
https://github.com/getsentry/sentry.git
ref: close files explicitly in tests.sentry.lang.javascript.test_processor (#35262)
105
0
18,688
12
3
6
def _is_current_explicit_device(device_type): device_type = device_type.upper() if device_type not in ["CPU", "GPU"]: raise ValueError('`device_type` should be either "CPU" or "GPU".') device = _get_current_tf_device() return device is not None and device.device_type == device_type.upper()
keras/backend.py
85
keras
{ "docstring": "Check if the current device is explicitly set on the device type specified.\n\n Args:\n device_type: A string containing `GPU` or `CPU` (case-insensitive).\n\n Returns:\n A boolean indicating if the current device scope is explicitly set on the\n device type.\n\n Raises:\n ValueError: If the `device_type` string indicates an unsupported device.\n ", "language": "en", "n_whitespaces": 88, "n_words": 48, "vocab_size": 33 }
31
Python
26
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,492
6
48
_is_current_explicit_device
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
53
0
80,124
10
8
17
def _promote_fields(dt1, dt2): # Both must be structured and have the same names in the same order if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names: raise TypeError("invalid type promotion") new_fields = [] for name in dt1.names: field1 = dt1.fields[name] field2 = dt2.fields[name] new_descr = promote_types(field1[0], field2[0]) # Check that the titles match (if given): if field1[2:] != field2[2:]: raise TypeError("invalid type promotion") if len(field1) == 2: new_fields.append((name, new_descr)) else: new_fields.append(((field1[2], name), new_descr)) return dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
numpy/core/_internal.py
231
numpy
{ "docstring": " Perform type promotion for two structured dtypes.\n\n Parameters\n ----------\n dt1 : structured dtype\n First dtype.\n dt2 : structured dtype\n Second dtype.\n\n Returns\n -------\n out : dtype\n The promoted dtype\n\n Notes\n -----\n If one of the inputs is aligned, the result will be. The titles of\n both descriptors must match (point to the same field).\n ", "language": "en", "n_whitespaces": 113, "n_words": 54, "vocab_size": 42 }
81
Python
62
a0c2e826738daa0cbd83aba85852405b73878f5b
_internal.py
160,283
15
147
_promote_fields
https://github.com/numpy/numpy.git
API: Fix structured dtype cast-safety, promotion, and comparison This PR replaces the old gh-15509 implementing proper type promotion for structured voids. It further fixes the casting safety to consider casts with equivalent field number and matching order as "safe" and if the names, titles, and offsets match as "equiv". The change perculates into the void comparison, and since it fixes the order, it removes the current FutureWarning there as well. This addresses https://github.com/liberfa/pyerfa/issues/77 and replaces gh-15509 (the implementation has changed too much). Fixes gh-15494 (and probably a few more) Co-authored-by: Allan Haldane <[email protected]>
188
0
38,591
15
1
29
def test_read_video_from_file_rescale_width_and_height(self, test_video): # video related width, height, min_dimension, max_dimension = 320, 240, 0, 0 video_start_pts, video_end_pts = 0, -1 video_timebase_num, video_timebase_den = 0, 1 # audio related samples, channels = 0, 0 audio_start_pts, audio_end_pts = 0, -1 audio_timebase_num, audio_timebase_den = 0, 1 full_path = os.path.join(VIDEO_DIR, test_video) tv_result = torch.ops.video_reader.read_video_from_file( full_path, SEEK_FRAME_MARGIN, 0, # getPtsOnly 1, # readVideoStream width, height, min_dimension, max_dimension, video_start_pts, video_end_pts, video_timebase_num, video_timebase_den, 1, # readAudioStream samples, channels, audio_start_pts, audio_end_pts, audio_timebase_num, audio_timebase_den, ) assert tv_result[0].size(1) == height assert tv_result[0].size(2) == width
test/test_video_reader.py
208
vision
{ "docstring": "\n Test the case when decoder starts with a video file to decode frames, and\n both video height and width are set.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
84
Python
52
c50d48845f7b1ca86d6a3b7f37a59be0ae11e36b
test_video_reader.py
192,311
31
145
test_read_video_from_file_rescale_width_and_height
https://github.com/pytorch/vision.git
Improve test_video_reader (#5498) * Improve test_video_reader * Fix linter error
394
0
46,880
10
1
20
def test_queries(self): sql = "SELECT 1" + connection.features.bare_select_suffix with connection.cursor() as cursor: reset_queries() cursor.execute(sql) self.assertEqual(1, len(connection.queries)) self.assertIsInstance(connection.queries, list) self.assertIsInstance(connection.queries[0], dict) self.assertEqual(list(connection.queries[0]), ["sql", "time"]) self.assertEqual(connection.queries[0]["sql"], sql) reset_queries() self.assertEqual(0, len(connection.queries)) sql = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % ( connection.introspection.identifier_converter("backends_square"), connection.ops.quote_name("root"), connection.ops.quote_name("square"), ) with connection.cursor() as cursor: cursor.executemany(sql, [(1, 1), (2, 4)]) self.assertEqual(1, len(connection.queries)) self.assertIsInstance(connection.queries, list) self.assertIsInstance(connection.queries[0], dict) self.assertEqual(list(connection.queries[0]), ["sql", "time"]) self.assertEqual(connection.queries[0]["sql"], "2 times: %s" % sql) # Unfortunately with sqlite3 the in-memory test database cannot be closed.
tests/backends/tests.py
421
django
{ "docstring": "\n Test the documented API of connection.queries.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
79
Python
58
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
201,818
24
257
test_queries
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
274
0
50,001
11
3
14
def b16decode(s, casefold=False): s = _bytes_from_decode_data(s) if casefold: s = s.upper() if re.search(b'[^0-9A-F]', s): raise binascii.Error('Non-base16 digit found') return binascii.unhexlify(s) # # Ascii85 encoding/decoding # _a85chars = None _a85chars2 = None _A85START = b"<~" _A85END = b"~>"
python3.10.4/Lib/base64.py
113
XX-Net
{ "docstring": "Decode the Base16 encoded bytes-like object or ASCII string s.\n\n Optional casefold is a flag specifying whether a lowercase alphabet is\n acceptable as input. For security purposes, the default is False.\n\n The result is returned as a bytes object. A binascii.Error is raised if\n s is incorrectly padded or if there are non-alphabet characters present\n in the input.\n ", "language": "en", "n_whitespaces": 78, "n_words": 58, "vocab_size": 45 }
37
Python
27
8198943edd73a363c266633e1aa5b2a9e9c9f526
base64.py
221,084
7
51
b16decode
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
59
0
56,192
10
2
15
def timezone_tag(parser, token): bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'%s' takes one argument (timezone)" % bits[0]) tz = parser.compile_filter(bits[1]) nodelist = parser.parse(("endtimezone",)) parser.delete_first_token() return TimezoneNode(nodelist, tz) @register.tag("get_current_timezone")
django/templatetags/tz.py
126
@register.tag("get_current_timezone")
django
{ "docstring": "\n Enable a given time zone just for this block.\n\n The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a\n time zone name, or ``None``. If it is ``None``, the default time zone is\n used within the block.\n\n Sample usage::\n\n {% timezone \"Europe/Paris\" %}\n It is {{ now }} in Paris.\n {% endtimezone %}\n ", "language": "en", "n_whitespaces": 99, "n_words": 55, "vocab_size": 43 }
29
Python
27
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tz.py
206,336
8
67
timezone_tag
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
56
1
51,491
11
4
15
def rich_text_style(self) -> Style: # TODO: Feels like there may be opportunity for caching here. background = Color(0, 0, 0, 0) color = Color(255, 255, 255, 0) style = Style() for node in reversed(self.ancestors): styles = node.styles if styles.has_rule("background"): background += styles.background if styles.has_rule("color"): color = styles.color style += styles.text_style style = Style(bgcolor=background.rich_color, color=color.rich_color) + style return style
src/textual/dom.py
165
textual
{ "docstring": "Get the text style object.\n\n A widget's style is influenced by its parent. For instance if a widgets background has an alpha,\n then its parent's background color will show throw. Additionally, widgets will inherit their\n parent's text style (i.e. bold, italic etc).\n\n Returns:\n Style: Rich Style object.\n ", "language": "en", "n_whitespaces": 93, "n_words": 47, "vocab_size": 38 }
58
Python
41
4090d351684342b8e28ef9d5451c7c821e18d1ae
dom.py
183,097
22
103
rich_text_style
https://github.com/Textualize/textual.git
new layout
188
0
44,049
11
11
37
def to_numpy_recarray(G, nodelist=None, dtype=None, order=None): import numpy as np if dtype is None: dtype = [("weight", float)] if nodelist is None: nodelist = list(G) nodeset = G nlen = len(G) else: nlen = len(nodelist) nodeset = set(G.nbunch_iter(nodelist)) if nlen != len(nodeset): for n in nodelist: if n not in G: raise nx.NetworkXError(f"Node {n} in nodelist is not in G") raise nx.NetworkXError("nodelist contains duplicates.") undirected = not G.is_directed() index = dict(zip(nodelist, range(nlen))) M = np.zeros((nlen, nlen), dtype=dtype, order=order) names = M.dtype.names for u, v, attrs in G.edges(data=True): if (u in nodeset) and (v in nodeset): i, j = index[u], index[v] values = tuple(attrs[n] for n in names) M[i, j] = values if undirected: M[j, i] = M[i, j] return M.view(np.recarray)
networkx/convert_matrix.py
385
networkx
{ "docstring": "Returns the graph adjacency matrix as a NumPy recarray.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy recarray.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data-type, optional\n A valid NumPy named dtype used to initialize the NumPy recarray.\n The data type names are assumed to be keys in the graph edge attribute\n dictionary. The default is ``dtype([(\"weight\", float)])``.\n\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. If None, then the NumPy default\n is used.\n\n Returns\n -------\n M : NumPy recarray\n The graph with specified edge data as a Numpy recarray\n\n Notes\n -----\n When `nodelist` does not contain every node in `G`, the adjacency\n matrix is built from the subgraph of `G` that is induced by the nodes in\n `nodelist`.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_edge(1, 2, weight=7.0, cost=5)\n >>> A = nx.to_numpy_recarray(G, dtype=[(\"weight\", float), (\"cost\", int)])\n >>> print(A.weight)\n [[0. 7.]\n [7. 0.]]\n >>> print(A.cost)\n [[0 5]\n [5 0]]\n\n ", "language": "en", "n_whitespaces": 337, "n_words": 190, "vocab_size": 116 }
119
Python
75
78cd999e9b60d1b403cb4b736311cb0e00335eea
convert_matrix.py
176,299
28
246
to_numpy_recarray
https://github.com/networkx/networkx.git
Document default dtype in to_numpy_recarray docstring. (#5315)
323
0
41,816
18
2
13
def __new__(cls, name, manifold, **kwargs): if not isinstance(name, Str): name = Str(name) obj = super().__new__(cls, name, manifold) obj.manifold.patches.append(obj) # deprecated obj.coord_systems = _deprecated_list( , []) return obj
sympy/diffgeom/diffgeom.py
101
sympy
{ "docstring": "\n Patch.coord_systms is deprecated. The Patch class is now\n immutable. Instead use a separate list to keep track of coordinate\n systems.\n ", "language": "en", "n_whitespaces": 65, "n_words": 20, "vocab_size": 19 }
27
Python
23
f8674bfe4988332e7ce60ceb36b365ce9aff662a
diffgeom.py
197,095
12
64
__new__
https://github.com/sympy/sympy.git
Update the sympy.diffgeom mutability deprecations
83
0
48,335
10
1
12
def real_quick_ratio(self): la, lb = len(self.a), len(self.b) # can't have more matches than the number of elements in the # shorter sequence return _calculate_ratio(min(la, lb), la + lb) __class_getitem__ = classmethod(GenericAlias)
python3.10.4/Lib/difflib.py
72
XX-Net
{ "docstring": "Return an upper bound on ratio() very quickly.\n\n This isn't defined beyond that it is an upper bound on .ratio(), and\n is faster to compute than either .ratio() or .quick_ratio().\n ", "language": "en", "n_whitespaces": 51, "n_words": 30, "vocab_size": 25 }
31
Python
28
8198943edd73a363c266633e1aa5b2a9e9c9f526
difflib.py
222,485
3
37
real_quick_ratio
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
69
0
56,586
10
1
1
def test_model_exclude_copy_on_model_validation():
tests/test_main.py
12
pydantic
{ "docstring": "When `Config.copy_on_model_validation` is set, it should keep private attributes and excluded fields", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
2
Python
2
5490ad5173743ef2bf85216d11b9ff0822b3d25b
test_main.py
14,093
29
236
test_model_exclude_copy_on_model_validation
https://github.com/pydantic/pydantic.git
fix: `Config.copy_on_model_validation` does a deep copy and not a shallow one (#3642) * fix: `Config.copy_on_model_validation` does a deep copy and not a shallow one closes #3641 * fix: typo * use python 3.10 to run fastapi tests * fix fastapi test call Co-authored-by: Samuel Colvin <[email protected]>
5
0
2,817
6
3
12
def calculate_env(self): env = dict(os.environ) # Make sure we're using a local executor flavour if conf.get("core", "executor") not in [ executor_constants.LOCAL_EXECUTOR, executor_constants.SEQUENTIAL_EXECUTOR, ]: if "sqlite" in conf.get("database", "sql_alchemy_conn"): self.print_output("standalone", "Forcing executor to SequentialExecutor") env["AIRFLOW__CORE__EXECUTOR"] = executor_constants.SEQUENTIAL_EXECUTOR else: self.print_output("standalone", "Forcing executor to LocalExecutor") env["AIRFLOW__CORE__EXECUTOR"] = executor_constants.LOCAL_EXECUTOR return env
airflow/cli/commands/standalone_command.py
153
airflow
{ "docstring": "\n Works out the environment variables needed to run subprocesses.\n We override some settings as part of being standalone.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 18 }
47
Python
36
d8889da29ccfcbecd2c89b9e8e278c480767d678
standalone_command.py
47,323
13
84
calculate_env
https://github.com/apache/airflow.git
Move the database configuration to a new section (#22284) Co-authored-by: gitstart-airflow <[email protected]> Co-authored-by: GitStart <[email protected]> Co-authored-by: Egbosi Kelechi <[email protected]>
193
0
9,065
13
4
76
def interpret(*args): df = pd.DataFrame([args], columns=X_train.columns) df = df.astype({col: "category" for col in categorical_columns}) shap_values = explainer.shap_values(xgb.DMatrix(df, enable_categorical=True)) scores_desc = list(zip(shap_values[0], X_train.columns)) scores_desc = sorted(scores_desc) fig_m = plt.figure(tight_layout=True) plt.barh([s[1] for s in scores_desc], [s[0] for s in scores_desc]) plt.title("Feature Shap Values") plt.ylabel("Shap Value") plt.xlabel("Feature") plt.tight_layout() return fig_m unique_class = sorted(X_train["workclass"].unique()) unique_education = sorted(X_train["education"].unique()) unique_marital_status = sorted(X_train["marital.status"].unique()) unique_relationship = sorted(X_train["relationship"].unique()) unique_occupation = sorted(X_train["occupation"].unique()) unique_sex = sorted(X_train["sex"].unique()) unique_country = sorted(X_train["native.country"].unique()) with gr.Blocks() as demo: gr.Markdown() with gr.Row(): with gr.Column(): age = gr.Slider(label="Age", minimum=17, maximum=90, step=1, randomize=True) work_class = gr.Dropdown( label="Workclass", choices=unique_class, value=lambda: random.choice(unique_class), ) education = gr.Dropdown( label="Education Level", choices=unique_education, value=lambda: random.choice(unique_education), ) years = gr.Slider( label="Years of schooling", minimum=1, maximum=16, step=1, randomize=True, ) marital_status = gr.Dropdown( label="Marital Status", choices=unique_marital_status, value=lambda: random.choice(unique_marital_status), ) occupation = gr.Dropdown( label="Occupation", choices=unique_occupation, value=lambda: random.choice(unique_occupation), ) relationship = gr.Dropdown( label="Relationship Status", choices=unique_relationship, value=lambda: random.choice(unique_relationship), ) sex = gr.Dropdown( label="Sex", choices=unique_sex, value=lambda: random.choice(unique_sex) ) capital_gain = gr.Slider( label="Capital Gain", minimum=0, maximum=100000, step=500, randomize=True, ) capital_loss = gr.Slider( label="Capital Loss", minimum=0, maximum=10000, step=500, randomize=True ) hours_per_week = gr.Slider( label="Hours Per Week Worked", minimum=1, maximum=99, step=1 ) country = gr.Dropdown( label="Native Country", choices=unique_country, value=lambda: random.choice(unique_country), ) with gr.Column(): label = gr.Label() plot = gr.Plot() with gr.Row(): predict_btn = gr.Button(value="Predict") interpret_btn = gr.Button(value="Explain") predict_btn.click( predict, inputs=[ age, work_class, education, years, marital_status, occupation, relationship, sex, capital_gain, capital_loss, hours_per_week, country, ], outputs=[label], ) interpret_btn.click( interpret, inputs=[ age, work_class, education, years, marital_status, occupation, relationship, sex, capital_gain, capital_loss, hours_per_week, country, ], outputs=[plot], ) demo.launch()
demo/xgboost-income-prediction-with-explainability/run.py
1,103
gradio
{ "docstring": "\n **Income Classification with XGBoost 💰**: This demo uses an XGBoost classifier predicts income based on demographic factors, along with Shapley value-based *explanations*. The [source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/xgboost-income-prediction-with-explainability/blob/main/app.py).\n ", "language": "en", "n_whitespaces": 39, "n_words": 31, "vocab_size": 28 }
239
Python
149
597337dcb8762cca6e718b59a4ab6f5e333645fd
run.py
180,915
13
138
interpret
https://github.com/gradio-app/gradio.git
Adding a Playground Tab to the Website (#1860) * added playground with 12 demos * change name to recipes, restyle navbar * add explanatory text to page * fix demo mapping * categorize demos, clean up design * styling * cateogry naming and emojis * refactor and add text demos * add view code button * remove opening slash in embed * styling * add image demos * adding plot demos * remove see code button * removed submodules * changes * add audio models * remove fun section * remove tests in image semgentation demo repo * requested changes * add outbreak_forecast * fix broken demos * remove images and models, add new demos * remove readmes, change to run.py, add description as comment * move to /demos folder, clean up dict * add upload_to_spaces script * fix script, clean repos, and add to docker file * fix python versioning issue * env variable * fix * env fixes * spaces instead of tabs * revert to original networking.py * fix rate limiting in asr and autocomplete * change name to demos * clean up navbar * move url and description, remove code comments * add tabs to demos * remove margins and footer from embedded demo * font consistency Co-authored-by: Abubakar Abid <[email protected]>
1,686
0
43,252
16
1
4
def regex_lookup(self, lookup_type): raise NotImplementedError( "subclasses of BaseDatabaseOperations may require a regex_lookup() method" )
django/db/backends/base/operations.py
25
django
{ "docstring": "\n Return the string to use in a query when performing regular expression\n lookups (using \"regex\" or \"iregex\"). It should contain a '%s'\n placeholder for the column being searched against.\n\n If the feature is not supported (or part of it is not supported), raise\n NotImplementedError.\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 39 }
14
Python
14
9c19aff7c7561e3a82978a272ecdaad40dda5c00
operations.py
204,878
4
13
regex_lookup
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
46
0
50,951
8
2
8
def execute(): frappe.reload_doc("accounts", "doctype", "gl_entry") for doctype in ["Sales Invoice", "Purchase Invoice", "Journal Entry"]: frappe.reload_doc("accounts", "doctype", frappe.scrub(doctype)) frappe.db.sql( .format( # nosec doctype=doctype ) )
erpnext/patches/v12_0/update_due_date_in_gle.py
101
erpnext
{ "docstring": " UPDATE `tabGL Entry`, `tab{doctype}`\n SET\n `tabGL Entry`.due_date = `tab{doctype}`.due_date\n WHERE\n `tabGL Entry`.voucher_no = `tab{doctype}`.name and `tabGL Entry`.party is not null\n and `tabGL Entry`.voucher_type in ('Sales Invoice', 'Purchase Invoice', 'Journal Entry')\n and `tabGL Entry`.account in (select name from `tabAccount` where account_type in ('Receivable', 'Payable'))", "language": "en", "n_whitespaces": 125, "n_words": 43, "vocab_size": 32 }
24
Python
20
494bd9ef78313436f0424b918f200dab8fc7c20b
update_due_date_in_gle.py
66,693
15
55
execute
https://github.com/frappe/erpnext.git
style: format code with black
16
0
14,296
12
1
2
async def async_tear_down(self) -> None:
homeassistant/components/mqtt/mixins.py
17
core
{ "docstring": "Handle the cleanup of platform specific parts, extend to the platform.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
5
Python
5
3b2aae5045f9f08dc8f174c5d975852588e1a132
mixins.py
296,364
2
8
async_tear_down
https://github.com/home-assistant/core.git
Refactor MQTT discovery (#67966) * Proof of concept * remove notify platform * remove loose test * Add rework from #67912 (#1) * Move notify serviceupdater to Mixins * Move tag discovery handler to Mixins * fix tests * Add typing for async_load_platform_helper * Add add entry unload support for notify platform * Simplify discovery updates * Remove not needed extra logic * Cleanup inrelevant or duplicate code * reuse update_device and move to mixins * Remove notify platform * revert changes to notify platform * Rename update class * unify tag entry setup * Use shared code for device_trigger `update_device` * PoC shared dispatcher for device_trigger * Fix bugs * Improve typing - remove async_update * Unload config_entry and tests * Release dispatcher after setup and deduplicate * closures to methods, revert `in` to `=`, updates * Re-add update support for tag platform * Re-add update support for device-trigger platform * Cleanup rediscovery code revert related changes * Undo discovery code shift * Update homeassistant/components/mqtt/mixins.py Co-authored-by: Erik Montnemery <[email protected]> * Update homeassistant/components/mqtt/device_trigger.py Co-authored-by: Erik Montnemery <[email protected]> * Update homeassistant/components/mqtt/mixins.py Co-authored-by: Erik Montnemery <[email protected]> * revert doc string changes * move conditions * typing and check config_entry_id * Update homeassistant/components/mqtt/mixins.py Co-authored-by: Erik Montnemery <[email protected]> * cleanup not used attribute * Remove entry_unload code and tests * update comment * add second comment Co-authored-by: Erik Montnemery <[email protected]>
12
0
95,348
6
7
13
def unpolarify(eq, subs=None, exponents_only=False): if isinstance(eq, bool): return eq eq = sympify(eq) if subs is not None: return unpolarify(eq.subs(subs)) changed = True pause = False if exponents_only: pause = True while changed: changed = False res = _unpolarify(eq, exponents_only, pause) if res != eq: changed = True eq = res if isinstance(res, bool): return res # Finally, replacing Exp(0) by 1 is always correct. # So is polar_lift(0) -> 0. return res.subs({exp_polar(0): 1, polar_lift(0): 0})
sympy/functions/elementary/complexes.py
184
sympy
{ "docstring": "\n If `p` denotes the projection from the Riemann surface of the logarithm to\n the complex line, return a simplified version `eq'` of `eq` such that\n `p(eq') = p(eq)`.\n Also apply the substitution subs in the end. (This is a convenience, since\n ``unpolarify``, in a certain sense, undoes :func:`polarify`.)\n\n Examples\n ========\n\n >>> from sympy import unpolarify, polar_lift, sin, I\n >>> unpolarify(polar_lift(I + 2))\n 2 + I\n >>> unpolarify(sin(polar_lift(I + 7)))\n sin(7 + I)\n ", "language": "en", "n_whitespaces": 112, "n_words": 72, "vocab_size": 56 }
75
Python
46
cda8dfe6f45dc5ed394c2f5cda706cd6c729f713
complexes.py
195,857
19
116
unpolarify
https://github.com/sympy/sympy.git
Improved documentation formatting
190
0
47,444
11
1
4
def get_url_name(self, view_name): return self.name + ":" + view_name
wagtail/admin/viewsets/base.py
29
wagtail
{ "docstring": "\n Returns the namespaced URL name for the given view.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
9
Python
8
b4d3cf1c30b5fbe7eed09fab90c845f0cd0f678c
base.py
77,721
2
16
get_url_name
https://github.com/wagtail/wagtail.git
Docs for base ViewSet class
23
0
16,698
8
4
10
def text(self, data): data = data middle = data.lstrip(spaceCharacters) left = data[:len(data) - len(middle)] if left: yield {"type": "SpaceCharacters", "data": left} data = middle middle = data.rstrip(spaceCharacters) right = data[len(middle):] if middle: yield {"type": "Characters", "data": middle} if right: yield {"type": "SpaceCharacters", "data": right}
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/treewalkers/base.py
169
transferlearning
{ "docstring": "Generates SpaceCharacters and Characters tokens\n\n Depending on what's in the data, this generates one or more\n ``SpaceCharacters`` and ``Characters`` tokens.\n\n For example:\n\n >>> from html5lib.treewalkers.base import TreeWalker\n >>> # Give it an empty tree just so it instantiates\n >>> walker = TreeWalker([])\n >>> list(walker.text(''))\n []\n >>> list(walker.text(' '))\n [{u'data': ' ', u'type': u'SpaceCharacters'}]\n >>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE\n [{u'data': ' ', u'type': u'SpaceCharacters'},\n {u'data': u'abc', u'type': u'Characters'},\n {u'data': u' ', u'type': u'SpaceCharacters'}]\n\n :arg data: the text data\n\n :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens\n\n ", "language": "en", "n_whitespaces": 253, "n_words": 87, "vocab_size": 60 }
44
Python
26
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
base.py
62,629
13
94
text
https://github.com/jindongwang/transferlearning.git
upd; format
147
0
13,021
11
5
13
def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, "resolve_expression"): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError( "Oracle backend does not support timezone-aware datetimes when USE_TZ is False." ) return Oracle_datetime.from_datetime(value)
django/db/backends/oracle/operations.py
112
django
{ "docstring": "\n Transform a datetime value to an object compatible with what is expected\n by the backend driver for datetime columns.\n\n If naive datetime is passed assumes that is in UTC. Normally Django\n models.DateTimeField makes sure that if USE_TZ is True passed datetime\n is timezone aware.\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 35 }
53
Python
42
9c19aff7c7561e3a82978a272ecdaad40dda5c00
operations.py
205,092
13
66
adapt_datetimefield_value
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
210
0
51,013
14