complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
37
def test_gpu_stats_monitor_no_queries(tmpdir): model = BoringModel() with pytest.deprecated_call(match="GPUStatsMonitor` callback was deprecated in v1.5"): gpu_stats = GPUStatsMonitor( memory_utilization=False, gpu_utilization=False, intra_step_time=True, inter_step_time=True, ) trainer = Trainer( default_root_dir=tmpdir, max_epochs=1, limit_train_batches=2, limit_val_batches=0, log_every_n_steps=1, accelerator="gpu", devices=1, callbacks=[gpu_stats], ) with mock.patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics") as log_metrics_mock: trainer.fit(model) assert log_metrics_mock.mock_calls[1:] == [ mock.call({"batch_time/intra_step (ms)": mock.ANY}, step=0), mock.call({"batch_time/inter_step (ms)": mock.ANY}, step=1), mock.call({"batch_time/intra_step (ms)": mock.ANY}, step=1), ] @pytest.mark.skipif(torch.cuda.is_available(), reason="test requires CPU machine")
tests/callbacks/test_gpu_stats_monitor.py
283
@pytest.mark.skipif(torch.cuda.is_available(), reason="test requires CPU machine")
lightning
{ "docstring": "Test GPU logger doesn't fail if no \"nvidia-smi\" queries are to be performed.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
59
Python
49
4710a8128b52179be2b1fa46b17677eda7b849ea
test_gpu_stats_monitor.py
241,709
26
159
test_gpu_stats_monitor_no_queries
https://github.com/Lightning-AI/lightning.git
Update test_gpu_stats_monitor.py to use `devices` instead of `gpus` or `ipus` (#11340)
224
1
69,662
12
6
33
def _unpack_observation(self, obs_batch): unpacked = _unpack_obs( np.array(obs_batch, dtype=np.float32), self.observation_space.original_space, tensorlib=np, ) if isinstance(unpacked[0], dict): assert "obs" in unpacked[0] unpacked_obs = [np.concatenate(tree.flatten(u["obs"]), 1) for u in unpacked] else: unpacked_obs = unpacked obs = np.concatenate(unpacked_obs, axis=1).reshape( [len(obs_batch), self.n_agents, self.obs_size] ) if self.has_action_mask: action_mask = np.concatenate( [o["action_mask"] for o in unpacked], axis=1 ).reshape([len(obs_batch), self.n_agents, self.n_actions]) else: action_mask = np.ones( [len(obs_batch), self.n_agents, self.n_actions], dtype=np.float32 ) if self.has_env_global_state: state = np.concatenate(tree.flatten(unpacked[0][ENV_STATE]), 1) else: state = None return obs, action_mask, state
rllib/agents/qmix/qmix_policy.py
338
ray
{ "docstring": "Unpacks the observation, action mask, and state (if present)\n from agent grouping.\n\n Returns:\n obs (np.ndarray): obs tensor of shape [B, n_agents, obs_size]\n mask (np.ndarray): action mask, if any\n state (np.ndarray or None): state tensor of shape [B, state_size]\n or None if it is not in the batch\n ", "language": "en", "n_whitespaces": 116, "n_words": 47, "vocab_size": 34 }
75
Python
50
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
qmix_policy.py
133,808
27
223
_unpack_observation
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
332
0
30,117
15
3
15
def test_create_accessible(self): response, page = self._create_page(Page.objects.get(pk=2)) self.assertIsNotNone(page.url) self.assertTrue( any( "View live" in message.message and page.url in message.message for message in response.context["messages"] ) )
wagtail/admin/tests/pages/test_edit_page.py
105
wagtail
{ "docstring": "\n Create a page under the site root, check the flash message has a valid\n \"View live\" button.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 15 }
23
Python
19
d10f15e55806c6944827d801cd9c2d53f5da4186
test_edit_page.py
71,575
9
63
test_create_accessible
https://github.com/wagtail/wagtail.git
Reformat with black
110
0
15,690
12
2
12
def q_sample(self, x_start, t, noise=None): if noise is None: # noise = th.randn_like(x_start) noise = paddle.randn(x_start.shape, x_start.dtype) assert noise.shape == x_start.shape return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py
109
PaddleHub
{ "docstring": "\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n ", "language": "en", "n_whitespaces": 102, "n_words": 52, "vocab_size": 42 }
33
Python
26
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
gaussian_diffusion.py
49,786
6
73
q_sample
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_cnclip_vitb16 module
98
0
9,909
11
7
19
def parse_query_string(query_string, operator=None, zero_terms=MATCH_NONE): filters, query_string = separate_filters_from_query(query_string) is_phrase = False tokens = [] for part in query_string.split('"'): part = part.strip() if part: if is_phrase: tokens.append(Phrase(part)) else: tokens.append( PlainText(part, operator=operator or PlainText.DEFAULT_OPERATOR) ) is_phrase = not is_phrase if tokens: if operator == "or": search_query = OR(tokens) else: search_query = AND(tokens) else: search_query = zero_terms return filters, search_query
wagtail/search/utils.py
193
wagtail
{ "docstring": "\n This takes a query string typed in by a user and extracts the following:\n\n - Quoted terms (for phrase search)\n - Filters\n\n For example, the following query:\n\n `hello \"this is a phrase\" live:true` would be parsed into:\n\n filters: {'live': 'true'}\n tokens: And([PlainText('hello'), Phrase('this is a phrase')])\n ", "language": "en", "n_whitespaces": 75, "n_words": 46, "vocab_size": 40 }
57
Python
38
d10f15e55806c6944827d801cd9c2d53f5da4186
utils.py
75,890
22
115
parse_query_string
https://github.com/wagtail/wagtail.git
Reformat with black
231
0
16,438
19
1
20
def test_memory_usage_completed_flows(tctx): gc.collect() flow_count = flows_tracked() server = Placeholder(Server) assert ( Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False) >> DataReceived(tctx.client, b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n") << OpenConnection(server) >> reply(None) << SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n") >> DataReceived(server, b"HTTP/1.1 204 No Content\r\n\r\n") << SendData(tctx.client, b"HTTP/1.1 204 No Content\r\n\r\n") ) gc.collect() assert flows_tracked() == flow_count
test/mitmproxy/proxy/layers/http/test_http.py
179
mitmproxy
{ "docstring": "Make sure that flows are not kept in memory after they are completed.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
48
Python
32
035b3bf37d9785fef45e81eb9c0c47fc53ab24d2
test_http.py
251,123
15
105
test_memory_usage_completed_flows
https://github.com/mitmproxy/mitmproxy.git
drop HTTP streams that are completed, fix #4456
149
0
73,601
17
1
3
def test_bad_csrf_cookie_length(self): self._check_bad_or_missing_cookie(16 * "a", "CSRF cookie has incorrect length.")
tests/csrf_tests/tests.py
32
django
{ "docstring": "\n If the CSRF cookie has an incorrect length in a POST request, the\n middleware rejects the incoming request.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
10
Python
10
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
202,430
2
16
test_bad_csrf_cookie_length
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
24
0
50,129
9
1
19
def test_dynamic_path(self): doc = Document.objects.create( title="does not matter", created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)), mime_type="application/pdf", pk=2, checksum="2", storage_path=StoragePath.objects.create(path="TestFolder/{created}"), ) self.assertEqual(generate_filename(doc), "TestFolder/2020-06-25.pdf")
src/documents/tests/test_file_handling.py
127
paperless-ngx
{ "docstring": "\n GIVEN:\n - A document with a defined storage path\n WHEN:\n - the filename is generated for the document\n THEN:\n - the generated filename uses the defined storage path for the document\n ", "language": "en", "n_whitespaces": 93, "n_words": 31, "vocab_size": 17 }
22
Python
22
69ef26dab04d51e7e102dcb33cd98ddc6ad975fd
test_file_handling.py
319,622
10
81
test_dynamic_path
https://github.com/paperless-ngx/paperless-ngx.git
Feature: Dynamic document storage pathes (#916) * Added devcontainer * Add feature storage pathes * Exclude tests and add versioning * Check escaping * Check escaping * Check quoting * Echo * Escape * Escape : * Double escape \ * Escaping * Remove if * Escape colon * Missing \ * Esacpe : * Escape all * test * Remove sed * Fix exclude * Remove SED command * Add LD_LIBRARY_PATH * Adjusted to v1.7 * Updated test-cases * Remove devcontainer * Removed internal build-file * Run pre-commit * Corrected flak8 error * Adjusted to v1.7 * Updated test-cases * Corrected flak8 error * Adjusted to new plural translations * Small adjustments due to code-review backend * Adjusted line-break * Removed PAPERLESS prefix from settings variables * Corrected style change due to search+replace * First documentation draft * Revert changes to Pipfile * Add sphinx-autobuild with keep-outdated * Revert merge error that results in wrong storage path is evaluated * Adjust styles of generated files ... * Adds additional testing to cover dynamic storage path functionality * Remove unnecessary condition * Add hint to edit storage path dialog * Correct spelling of pathes to paths * Minor documentation tweaks * Minor typo * improving wrapping of filter editor buttons with new storage path button * Update .gitignore * Fix select border radius in non input-groups * Better storage path edit hint * Add note to edit storage path dialog re document_renamer * Add note to bulk edit storage path re document_renamer * Rename FILTER_STORAGE_DIRECTORY to PATH * Fix broken filter rule parsing * Show default storage if unspecified * Remove note re storage path on bulk edit * Add basic validation of filename variables Co-authored-by: Markus Kling <[email protected]> Co-authored-by: Trenton Holmes <[email protected]> Co-authored-by: Michael Shamoon <[email protected]> Co-authored-by: Quinn Casey <[email protected]>
116
0
116,979
13
4
19
def appell_poly(n, seq, v, f, K, x=None, polys=False): if n < 0: raise ValueError( "Cannot generate Appell sequence polynomial of order %s" % n) poly = DMP(dup_appell(int(n), seq, v, f, K), K) if x is not None: poly = Poly.new(poly, x) else: poly = PurePoly.new(poly, Dummy('x')) return poly if polys else poly.as_expr() @public
sympy/polys/appellseqs.py
151
@public
sympy
{ "docstring": "Generates the nth polynomial in `x` of the Appell sequence with\n parameters `seq`, `v` and `f`.\n\n Parameters\n ==========\n\n n : int\n Order of the polynomial.\n seq : iterable\n v : Expr\n f : callable\n K : Domain\n Domain in which to perform computations and in which the coefficients\n of the specified sequence's polynomials lie in.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 133, "n_words": 72, "vocab_size": 53 }
53
Python
43
e875bdb804b0285e4a9bd8de0158436e792c03cb
appellseqs.py
199,617
10
97
appell_poly
https://github.com/sympy/sympy.git
Initial definition of Appell sequences
102
1
49,295
14
2
8
def config_for_enable_caching_device(rnn_cell): default_enable_caching_device = ( tf.compat.v1.executing_eagerly_outside_functions() ) if rnn_cell._enable_caching_device != default_enable_caching_device: return {"enable_caching_device": rnn_cell._enable_caching_device} return {}
keras/layers/rnn/rnn_utils.py
64
keras
{ "docstring": "Return the dict config for RNN cell wrt to enable_caching_device field.\n\n Since enable_caching_device is a internal implementation detail for speed up\n the RNN variable read when running on the multi remote worker setting, we\n don't want this config to be serialized constantly in the JSON. We will only\n serialize this field when a none default value is used to create the cell.\n Args:\n rnn_cell: the RNN cell for serialize.\n\n Returns:\n A dict which contains the JSON config for enable_caching_device value or\n empty dict if the enable_caching_device value is same as the default value.\n ", "language": "en", "n_whitespaces": 129, "n_words": 93, "vocab_size": 62 }
16
Python
15
84afc5193d38057e2e2badf9c889ea87d80d8fbf
rnn_utils.py
274,086
7
37
config_for_enable_caching_device
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
45
0
81,176
11
1
2
def size(self): return self["size"]
packages/python/plotly/plotly/graph_objs/bar/marker/_pattern.py
22
plotly.py
{ "docstring": "\n Sets the size of unit squares of the pattern fill in pixels,\n which corresponds to the interval of repetition of the pattern.\n\n The 'size' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n ", "language": "en", "n_whitespaces": 125, "n_words": 57, "vocab_size": 44 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_pattern.py
228,787
2
11
size
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,460
7
4
18
def _cmp_op(self, other, op_name): lhs_dtype_class = self._get_dtype_cmp_class(self._dtype) rhs_dtype_class = self._get_dtype_cmp_class(other._dtype) res_dtype = get_dtype(bool) # In HDK comparison with NULL always results in NULL, # but in pandas it is True for 'ne' comparison and False # for others. # Also pandas allows 'eq' and 'ne' comparison for values # of incompatible types which doesn't work in HDK. if lhs_dtype_class != rhs_dtype_class: if op_name == "eq" or op_name == "ne": return LiteralExpr(op_name == "ne") else: raise TypeError( f"Invalid comparison between {self._dtype} and {other._dtype}" ) else: cmp = OpExpr(self.binary_operations[op_name], [self, other], res_dtype) return build_if_then_else( self.is_null(), LiteralExpr(op_name == "ne"), cmp, res_dtype )
modin/experimental/core/execution/native/implementations/hdk_on_native/expr.py
192
modin
{ "docstring": "\n Build a comparison expression.\n\n Parameters\n ----------\n other : BaseExpr\n A value to compare with.\n op_name : str\n The comparison operation name.\n\n Returns\n -------\n BaseExpr\n The resulting comparison expression.\n ", "language": "en", "n_whitespaces": 125, "n_words": 28, "vocab_size": 22 }
99
Python
70
e5b1888cd932909e49194d58035da34b210b91c4
expr.py
154,585
16
106
_cmp_op
https://github.com/modin-project/modin.git
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Andrey Pavlenko <[email protected]>
310
0
36,095
15
1
8
def mock_update_empty_fixture(mock_update): mock_update.return_value = None yield mock_update @pytest.mark.parametrize( "data,options", [(MOCK_CONFIG, {})], ) @pytest.mark.usefixtures("mock_update", "mock_config")
tests/components/google_travel_time/test_sensor.py
75
@pytest.mark.parametrize( "data,options", [(MOCK_CONFIG, {})], ) @pytest.mark.usefixtures("mock_update", "mock_config")
core
{ "docstring": "Mock an update to the sensor with an empty response.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
14
Python
14
beb30a1ff199596163c655e8ae745a0f1649b78a
test_sensor.py
292,225
3
13
mock_update_empty_fixture
https://github.com/home-assistant/core.git
Add google_travel_time sensor tests (#66568) Co-authored-by: Paulus Schoutsen <[email protected]>
26
1
91,325
9
1
3
async def count_real_users(self) -> int:
synapse/storage/databases/main/registration.py
17
synapse
{ "docstring": "Counts all users without a special user_type registered on the homeserver.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
5
Python
5
1783156dbcf4164692e66275d1c29857c434995b
registration.py
248,017
4
22
count_real_users
https://github.com/matrix-org/synapse.git
Add some type hints to datastore (#12423) * Add some type hints to datastore * newsfile * change `Collection` to `List` * refactor return type of `select_users_txn` * correct type hint in `stream.py` * Remove `Optional` in `select_users_txn` * remove not needed return type in `__init__` * Revert change in `get_stream_id_for_event_txn` * Remove import from `Literal`
12
0
72,048
6
15
34
def sign(e, x): if not isinstance(e, Basic): raise TypeError("e should be an instance of Basic") if e.is_positive: return 1 elif e.is_negative: return -1 elif e.is_zero: return 0 elif not e.has(x): from sympy.simplify import logcombine e = logcombine(e) return _sign(e) elif e == x: return 1 elif e.is_Mul: a, b = e.as_two_terms() sa = sign(a, x) if not sa: return 0 return sa * sign(b, x) elif isinstance(e, exp): return 1 elif e.is_Pow: if e.base == S.Exp1: return 1 s = sign(e.base, x) if s == 1: return 1 if e.exp.is_Integer: return s**e.exp elif isinstance(e, log): return sign(e.args[0] - 1, x) # if all else fails, do it the hard way c0, e0 = mrv_leadterm(e, x) return sign(c0, x) @debug @timeit @cacheit
sympy/series/gruntz.py
339
@debug @timeit @cacheit
sympy
{ "docstring": "\n Returns a sign of an expression e(x) for x->oo.\n\n ::\n\n e > 0 for x sufficiently large ... 1\n e == 0 for x sufficiently large ... 0\n e < 0 for x sufficiently large ... -1\n\n The result of this function is currently undefined if e changes sign\n arbitrarily often for arbitrarily large x (e.g. sin(x)).\n\n Note that this returns zero only if e is *constantly* zero\n for x sufficiently large. [If e is constant, of course, this is just\n the same thing as the sign of e.]\n ", "language": "en", "n_whitespaces": 139, "n_words": 89, "vocab_size": 50 }
121
Python
73
f757f3daae6e11ea0cfb7dadc133274d8d74315f
gruntz.py
196,818
35
209
sign
https://github.com/sympy/sympy.git
Reordered imports 2
330
1
48,196
13
4
24
def statistics(self, refresh=False, approximate=False): # Prepare array with arguments for capi function smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double() stats_args = [ self._ptr, c_int(approximate), byref(smin), byref(smax), byref(smean), byref(sstd), c_void_p(), c_void_p(), ] if refresh or self._stats_refresh: func = capi.compute_band_statistics else: # Add additional argument to force computation if there is no # existing PAM file to take the values from. force = True stats_args.insert(2, c_int(force)) func = capi.get_band_statistics # Computation of statistics fails for empty bands. try: func(*stats_args) result = smin.value, smax.value, smean.value, sstd.value except GDALException: result = (None, None, None, None) self._stats_refresh = False return result
django/contrib/gis/gdal/raster/band.py
241
django
{ "docstring": "\n Compute statistics on the pixel values of this band.\n\n The return value is a tuple with the following structure:\n (minimum, maximum, mean, standard deviation).\n\n If approximate=True, the statistics may be computed based on overviews\n or a subset of image tiles.\n\n If refresh=True, the statistics will be computed from the data directly,\n and the cache will be updated where applicable.\n\n For empty bands (where all pixel values are nodata), all statistics\n values are returned as None.\n\n For raster formats using Persistent Auxiliary Metadata (PAM) services,\n the statistics might be cached in an auxiliary file.\n ", "language": "en", "n_whitespaces": 178, "n_words": 93, "vocab_size": 68 }
98
Python
77
9c19aff7c7561e3a82978a272ecdaad40dda5c00
band.py
204,002
25
156
statistics
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
369
0
50,606
12
6
23
def logical_or(self, other, context=None): if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) # fill to context.prec (opa, opb) = self._fill_logical(context, self._int, other._int) # make the operation, and clean starting zeroes result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)]) return _dec_from_triple(0, result.lstrip('0') or '0', 0)
python3.10.4/Lib/_pydecimal.py
197
XX-Net
{ "docstring": "Applies an 'or' operation between self and other's digits.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
54
Python
45
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pydecimal.py
219,615
9
122
logical_or
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
139
0
55,652
14
1
21
def test_localize_pk_shortcut(self): holder = Holder.objects.create(pk=123456789, dummy=42) inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly="") response = self.client.get( reverse("admin:admin_inlines_holder_change", args=(holder.id,)) ) inner_shortcut = "r/%s/%s/" % ( ContentType.objects.get_for_model(inner).pk, inner.pk, ) self.assertContains(response, inner_shortcut)
tests/admin_inlines/tests.py
153
django
{ "docstring": "\n The \"View on Site\" link is correct for locales that use thousand\n separators.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
28
Python
24
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,245
11
97
test_localize_pk_shortcut
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
117
0
51,910
13
1
5
def convert_field_to_list_or_connection(field, registry=None): model = field.related_model
netbox/netbox/graphql/__init__.py
26
netbox
{ "docstring": "\n From graphene_django.converter.py we need to monkey-patch this to return\n our ObjectListField with filtering support instead of DjangoListField\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 16 }
6
Python
6
99cf1b16718ca8bc037f546c41a9258bcc89b495
__init__.py
265,857
4
22
convert_field_to_list_or_connection
https://github.com/netbox-community/netbox.git
8245 add graphql filtering at all levels (#10618) * 8245 monkey-patch graphene-django to support filtering at all levels * 8245 fix tests * 8245 fix tests
12
0
78,216
7
3
23
def integration_reduction(facets, index, a, b, expr, dims, degree): expr = _sympify(expr) if expr.is_zero: return expr value = S.Zero x0 = facets[index].points[0] m = len(facets) gens = (x, y) inner_product = diff(expr, gens[0]) * x0[0] + diff(expr, gens[1]) * x0[1] if inner_product != 0: value += integration_reduction(facets, index, a, b, inner_product, dims, degree - 1) value += left_integral2D(m, index, facets, x0, expr, gens) return value/(len(dims) + degree - 1)
sympy/integrals/intpoly.py
208
sympy
{ "docstring": "Helper method for main_integrate. Returns the value of the input\n expression evaluated over the polytope facet referenced by a given index.\n\n Parameters\n ===========\n\n facets :\n List of facets of the polytope.\n index :\n Index referencing the facet to integrate the expression over.\n a :\n Hyperplane parameter denoting direction.\n b :\n Hyperplane parameter denoting distance.\n expr :\n The expression to integrate over the facet.\n dims :\n List of symbols denoting axes.\n degree :\n Degree of the homogeneous polynomial.\n\n Examples\n ========\n\n >>> from sympy.abc import x, y\n >>> from sympy.integrals.intpoly import integration_reduction,\\\n hyperplane_parameters\n >>> from sympy import Point, Polygon\n >>> triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1))\n >>> facets = triangle.sides\n >>> a, b = hyperplane_parameters(triangle)[0]\n >>> integration_reduction(facets, 0, a, b, 1, (x, y), 0)\n 5\n ", "language": "en", "n_whitespaces": 240, "n_words": 125, "vocab_size": 79 }
68
Python
43
498015021131af4dbb07eb110e5badaba8250c7b
intpoly.py
196,330
14
145
integration_reduction
https://github.com/sympy/sympy.git
Updated import locations
153
0
47,830
11
9
32
def forward_test(self, aug_batch_imgs, aug_batch_data_samples, **kwargs): num_augs = len(aug_batch_data_samples) batch_size = len(aug_batch_data_samples[0]) aug_batch_img_metas = [] for aug_index in range(num_augs): batch_img_metas = [] for batch_index in range(batch_size): single_data_sample = aug_batch_data_samples[aug_index][ batch_index] batch_img_metas.append(single_data_sample.meta) aug_batch_img_metas.append(batch_img_metas) for var, name in [(aug_batch_imgs, 'imgs'), (aug_batch_img_metas, 'img_metas')]: if not isinstance(var, list): raise TypeError('{} must be a list, but got {}'.format( name, type(var))) num_augs = len(aug_batch_imgs) if num_augs != len(aug_batch_img_metas): raise ValueError( 'num of augmentations ({}) != num of image meta ({})'.format( len(aug_batch_imgs), len(aug_batch_img_metas))) # NOTE the batched image size information may be useful, e.g. # in DETR, this is needed for the construction of masks, which is # then used for the transformer_head. for batch_img, batch_img_metas in zip(aug_batch_imgs, aug_batch_img_metas): batch_size = len(batch_img_metas) for img_id in range(batch_size): batch_img_metas[img_id]['batch_input_shape'] = \ tuple(batch_img.size()[-2:]) if num_augs == 1: return self.simple_test(aug_batch_imgs[0], aug_batch_img_metas[0], **kwargs) else: assert 'proposals' not in kwargs, '`self.aug_test` do not ' \ 'support pre-difined proposals' aug_results = self.aug_test(aug_batch_imgs, aug_batch_img_metas, **kwargs) return aug_results
mmdet/models/detectors/base.py
394
mmdetection
{ "docstring": "\n Args:\n aug_batch_imgs (List[Tensor]): the outer list indicates test-time\n augmentations, the Tensor should have a shape NxCxHxW.\n We only support batch size = 1 when do the augtest.\n aug_batch_data_samples (List[List[:obj:`GeneralData`]]): the\n outer list indicates test-time augmentations and inner list\n indicates batch dimension. We only support batch size = 1 when\n do the augtest.\n\n Returns:\n list(obj:`InstanceData`): Detection results of the\n input images. Each item usually contains\\\n following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance,)\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances,).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n ", "language": "en", "n_whitespaces": 327, "n_words": 103, "vocab_size": 68 }
151
Python
105
9c5b3331ac8edbfa328922fbab45c382380da540
base.py
244,367
36
247
forward_test
https://github.com/open-mmlab/mmdetection.git
Simplify api of one-stage detector
710
0
70,356
15
4
17
def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix): # meta device was added in pt=1.9 require_version_core("torch>=1.9") # dematerialize param storage for keys that are going to be replaced by state_dict, by # putting those on the meta device for k in loaded_state_dict_keys: submodule, param_name = find_submodule_and_param_name(model, k, start_prefix) if submodule is not None: # selectively switch to the meta device only those params/buffers that will # be next replaced from state_dict. This a complex way to do p.to_("meta") # since we have no in-place to_ for tensors. new_val = getattr(submodule, param_name) if isinstance(new_val, torch.nn.Parameter): # isinstance returns False for Params on meta device, so switch after the check new_val = torch.nn.Parameter(new_val.to("meta")) else: new_val = new_val.to("meta") setattr(submodule, param_name, new_val)
src/transformers/modeling_utils.py
152
transformers
{ "docstring": "\n Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params.\n\n `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in\n `bert.pooler.dense.weight`\n\n ", "language": "en", "n_whitespaces": 45, "n_words": 32, "vocab_size": 29 }
114
Python
82
5da33f872913255d64717efe745a053975bbc28e
modeling_utils.py
37,154
11
90
_move_model_to_meta
https://github.com/huggingface/transformers.git
[modeling utils] revamp `from_pretrained(..., low_cpu_mem_usage=True)` + tests (#16657) * add low_cpu_mem_usage tests * wip: revamping * wip * install /usr/bin/time * wip * cleanup * cleanup * cleanup * cleanup * cleanup * fix assert * put the wrapper back * cleanup; switch to bert-base-cased * Trigger CI * Trigger CI
268
0
6,745
17
7
31
def _get_source_sum(source_hash, file_path, saltenv): ret = dict() schemes = ("salt", "http", "https", "ftp", "swift", "s3", "file") invalid_hash_msg = ( "Source hash '{}' format is invalid. It must be in " "the format <hash type>=<hash>".format(source_hash) ) source_hash = str(source_hash) source_hash_scheme = urllib.parse.urlparse(source_hash).scheme if source_hash_scheme in schemes: # The source_hash is a file on a server try: cached_hash_file = __salt__["cp.cache_file"](source_hash, saltenv) except MinionError as exc: log.exception("Failed to cache %s", source_hash, exc_info=exc) raise if not cached_hash_file: raise CommandExecutionError( "Source hash file {} not found".format(source_hash) ) ret = __salt__["file.extract_hash"](cached_hash_file, "", file_path) if ret is None: raise SaltInvocationError(invalid_hash_msg) else: # The source_hash is a hash string items = source_hash.split("=", 1) if len(items) != 2: invalid_hash_msg = "{}, or it must be a supported protocol: {}".format( invalid_hash_msg, ", ".join(schemes) ) raise SaltInvocationError(invalid_hash_msg) ret["hash_type"], ret["hsum"] = (item.strip().lower() for item in items) return ret
salt/modules/win_pkg.py
350
salt
{ "docstring": "\n Extract the hash sum, whether it is in a remote hash file, or just a string.\n ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 14 }
136
Python
93
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
win_pkg.py
215,965
31
201
_get_source_sum
https://github.com/saltstack/salt.git
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <[email protected]>
379
0
54,287
16
4
24
def generate_rgbas_array(self, color, opacity): colors = list(tuplify(color)) opacities = list(tuplify(opacity)) rgbas = np.array( [color_to_rgba(c, o) for c, o in zip(*make_even(colors, opacities))], ) sheen_factor = self.get_sheen_factor() if sheen_factor != 0 and len(rgbas) == 1: light_rgbas = np.array(rgbas) light_rgbas[:, :3] += sheen_factor np.clip(light_rgbas, 0, 1, out=light_rgbas) rgbas = np.append(rgbas, light_rgbas, axis=0) return rgbas
manim/mobject/types/vectorized_mobject.py
193
manim
{ "docstring": "\n First arg can be either a color, or a tuple/list of colors.\n Likewise, opacity can either be a float, or a tuple of floats.\n If self.sheen_factor is not zero, and only\n one color was passed in, a second slightly light color\n will automatically be added for the gradient\n ", "language": "en", "n_whitespaces": 91, "n_words": 48, "vocab_size": 37 }
51
Python
42
d8dc0b462d973f0c1ddd62e557d2da89e45f6265
vectorized_mobject.py
189,410
13
125
generate_rgbas_array
https://github.com/ManimCommunity/manim.git
Cleanup `simple_functions.py` (#2437) * Remove fdiv * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * actually remove fdiv * Use lru cache and scipy's func * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * set maxsize should be enough for how it's used * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove get_num_args * Remove one instance of clip_in_place * Readd clip_in_place, it has a use * rm unnecessary line * Properly clip color * Revert "Properly clip color" This reverts commit 0591c7833457930b399f4125958f81d038c96e69. * remove clip in place * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * actually remove Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
162
0
46,046
15
1
2
def getExtraIncludeDirectories(self): # Virtual method, pylint: disable=no-self-use return None
nuitka/plugins/PluginBase.py
17
Nuitka
{ "docstring": "Decide which extra directories to use for C includes in compilation.\n\n Returns:\n List of directories or None by default\n ", "language": "en", "n_whitespaces": 44, "n_words": 19, "vocab_size": 18 }
9
Python
9
5251e9561d7d1527fb99068e7b3e33592394cc16
PluginBase.py
178,812
2
8
getExtraIncludeDirectories
https://github.com/Nuitka/Nuitka.git
Plugins: Add interface for adding include directories for C
30
0
42,830
6
1
6
def list_datasets() -> List[str]: return sorted(_get_dataset_configs().keys())
ludwig/datasets/__init__.py
38
ludwig
{ "docstring": "Returns a list of the names of all available datasets.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
6
Python
6
e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a
__init__.py
8,063
3
21
list_datasets
https://github.com/ludwig-ai/ludwig.git
Config-first Datasets API (ludwig.datasets refactor) (#2479) * Adds README and stub for reading dataset configs. * Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py * Print config files in datasets folder. * First pass at automatic archive extraction. * Implemented downloading and extract. * Refactor DatasetConfig into its own file. * Fixed bugs downloading kaggle dataset. * Makes registry store dataset instances, not classes. Also comments out import_submodules for testing. * Typo fix. * Only pass data files on to load_unprocessed_dataframe, symlink directories. * Downloading dataset files into existing directory if exists. * Refactor: make datasets fully config-first, lazy load dataset loaders. * Implemented agnews custom loader. * Implements train/validation/test split by files, and globbing support * Adds _glob_multiple * Adds adult_census_income, agnews, allstate_claims_severity. * Implements sha256 verification, adds more datasets up to creditcard_fraud. * Adds checksums, dbpedia, electricity * Fixes gzip file name returned as string not list, adds up to forest_cover dataset. * Adds datasets up to reuters_r8 * Adds all datasets which don't require a custom class. * Restore dataset import behavior by implementing module __getattr__ * Adds KDD datasets. * Adds ieee_fraud. * Adds imbalanced_insurance, insurance_lite. * Adds mnist. * Completes implementation of all of the built-in datasets. * Made cache_dir optional, read from environment variable if set. * Upgrades datasets tests. * Adds test for new dataset config API. Also adds scripts for dataset link checking. * Fixes loading allstate claims severity dataset. * Use @lru_cache(1), @cache not supported in python < 3.9 * Deletes dataset registry, updates automl test utils * Fix imports of datasets API. * Adds more detail to sha256: docstring and basic README * Copy-paste link oops. * Fixes handling of nested archive types like .tar.bz Also adds a LUDWIG_CACHE and export to the README * Adds link for twitter bots. * Fix order of splits in README.md * typo * Adds verify as a phase in doc string. * Support .pqt, .pq extensions for parquet. * Handle nested archives with longer file extensions like .csv.zip * Handle nested .gz types properly too. Check all extensions with .endswith * Handle all archive types with .endswith * Update ludwig/datasets/loaders/split_loaders.py Co-authored-by: Joppe Geluykens <[email protected]> * Adds explanation for export, fixes preserve_paths (should be relative to processed_dataset_dir) * Resolve preserved paths relative to raw dataset dir before move. * Catch runtime exception from extracting sub-archives. Co-authored-by: Daniel Treiman <[email protected]> Co-authored-by: Joppe Geluykens <[email protected]>
12
0
1,318
11
1
11
def test_cable_cannot_terminate_to_an_existing_connection(self): # Try to create a cable with the same interface terminations cable = Cable(a_terminations=[self.interface2], b_terminations=[self.interface1]) with self.assertRaises(ValidationError): cable.clean()
netbox/dcim/tests/test_models.py
68
netbox
{ "docstring": "\n Either side of a cable cannot be terminated when that side already has a connection\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 13 }
20
Python
18
3a461d02793e6f9d41c2b1a92647e691de1abaac
test_models.py
264,887
4
39
test_cable_cannot_terminate_to_an_existing_connection
https://github.com/netbox-community/netbox.git
Update Cable instantiations to match new signature
59
0
77,898
11
5
15
def _get_pyqt_webengine_qt_version() -> Optional[str]: try: import importlib.metadata as importlib_metadata # type: ignore[import] except ImportError: try: import importlib_metadata # type: ignore[no-redef] except ImportError: log.misc.debug("Neither importlib.metadata nor backport available") return None for suffix in ['Qt5', 'Qt']: try: return importlib_metadata.version(f'PyQtWebEngine-{suffix}') except importlib_metadata.PackageNotFoundError: log.misc.debug(f"PyQtWebEngine-{suffix} not found") return None @dataclasses.dataclass
qutebrowser/utils/version.py
144
@dataclasses.dataclass
qutebrowser
{ "docstring": "Get the version of the PyQtWebEngine-Qt package.\n\n With PyQtWebEngine 5.15.3, the QtWebEngine binary got split into its own\n PyQtWebEngine-Qt PyPI package:\n\n https://www.riverbankcomputing.com/pipermail/pyqt/2021-February/043591.html\n https://www.riverbankcomputing.com/pipermail/pyqt/2021-February/043638.html\n\n PyQtWebEngine 5.15.4 renamed it to PyQtWebEngine-Qt5...:\n https://www.riverbankcomputing.com/pipermail/pyqt/2021-March/043699.html\n\n Here, we try to use importlib.metadata or its backport (optional dependency) to\n figure out that version number. If PyQtWebEngine is installed via pip, this will\n give us an accurate answer.\n ", "language": "en", "n_whitespaces": 90, "n_words": 60, "vocab_size": 51 }
45
Python
32
4094e15bcbe71311685cb8c57abb6bfb4deadbdc
version.py
320,660
30
73
_get_pyqt_webengine_qt_version
https://github.com/qutebrowser/qutebrowser.git
version: Always prefer builtin importlib.metadata If we have a builtin importlib.metadata (Python 3.8+) and the importlib_metadata backport installed, we preferred the backport. However, the version.py tests do the opposite: They only mock the builtin if it is available. This did lead to failing tests if the backport was installed in an environment where the builtin was available too. Since we don't need any specialized functionality (only reading the version), we can prefer the builtin no matter whether a backport is available or not.
151
1
117,258
14
3
32
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"): r n_samples = X.shape[0] if n_neighbors >= n_samples / 2: raise ValueError( f"n_neighbors ({n_neighbors}) should be less than n_samples / 2" f" ({n_samples / 2})" ) dist_X = pairwise_distances(X, metric=metric) if metric == "precomputed": dist_X = dist_X.copy() # we set the diagonal to np.inf to exclude the points themselves from # their own neighborhood np.fill_diagonal(dist_X, np.inf) ind_X = np.argsort(dist_X, axis=1) # `ind_X[i]` is the index of sorted distances between i and other samples ind_X_embedded = ( NearestNeighbors(n_neighbors=n_neighbors) .fit(X_embedded) .kneighbors(return_distance=False) ) # We build an inverted index of neighbors in the input space: For sample i, # we define `inverted_index[i]` as the inverted index of sorted distances: # inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1) inverted_index = np.zeros((n_samples, n_samples), dtype=int) ordered_indices = np.arange(n_samples + 1) inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:] ranks = ( inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors ) t = np.sum(ranks[ranks > 0]) t = 1.0 - t * ( 2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0)) ) return t
sklearn/manifold/_t_sne.py
352
scikit-learn
{ "docstring": "Expresses to what extent the local structure is retained.\n\n The trustworthiness is within [0, 1]. It is defined as\n\n .. math::\n\n T(k) = 1 - \\frac{2}{nk (2n - 3k - 1)} \\sum^n_{i=1}\n \\sum_{j \\in \\mathcal{N}_{i}^{k}} \\max(0, (r(i, j) - k))\n\n where for each sample i, :math:`\\mathcal{N}_{i}^{k}` are its k nearest\n neighbors in the output space, and every sample j is its :math:`r(i, j)`-th\n nearest neighbor in the input space. In other words, any unexpected nearest\n neighbors in the output space are penalised in proportion to their rank in\n the input space.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)\n If the metric is 'precomputed' X must be a square distance\n matrix. Otherwise it contains a sample per row.\n\n X_embedded : ndarray of shape (n_samples, n_components)\n Embedding of the training data in low-dimensional space.\n\n n_neighbors : int, default=5\n The number of neighbors that will be considered. Should be fewer than\n `n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as\n mentioned in [1]_. An error will be raised otherwise.\n\n metric : str or callable, default='euclidean'\n Which metric to use for computing pairwise distances between samples\n from the original input space. If metric is 'precomputed', X must be a\n matrix of pairwise distances or squared distances. Otherwise, for a list\n of available metrics, see the documentation of argument metric in\n `sklearn.pairwise.pairwise_distances` and metrics listed in\n `sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the\n \"cosine\" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n trustworthiness : float\n Trustworthiness of the low-dimensional embedding.\n\n References\n ----------\n .. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood\n Preservation in Nonlinear Projection Methods: An Experimental Study.\n In Proceedings of the International Conference on Artificial Neural Networks\n (ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.\n\n .. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving\n Local Structure. Proceedings of the Twelth International Conference on\n Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.\n ", "language": "en", "n_whitespaces": 550, "n_words": 314, "vocab_size": 202 }
173
Python
115
ade90145c9c660a1a7baf2315185995899b0f356
_t_sne.py
259,640
84
228
trustworthiness
https://github.com/scikit-learn/scikit-learn.git
FIX Raise error when n_neighbors >= n_samples / 2 in manifold.trustworthiness (#23033) Co-authored-by: Shao Yang Hong <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
322
0
75,842
16
1
5
def writelines(self, seq): return _compression.BaseStream.writelines(self, seq)
python3.10.4/Lib/bz2.py
31
XX-Net
{ "docstring": "Write a sequence of byte strings to the file.\n\n Returns the number of uncompressed bytes written.\n seq can be any iterable yielding byte strings.\n\n Line separators are not added between the written byte strings.\n ", "language": "en", "n_whitespaces": 62, "n_words": 34, "vocab_size": 28 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
bz2.py
221,197
2
19
writelines
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
20
0
56,267
8
1
3
def test_generic_errors(self, constructor):
pandas/tests/indexes/interval/test_constructors.py
15
pandas
{ "docstring": "\n override the base class implementation since errors are handled\n differently; checks unnecessary since caught at the Interval level\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
3
Python
3
76923d7b58d8f25329e779a40b87e2b6959f9cea
test_constructors.py
170,653
2
9
test_generic_errors
https://github.com/pandas-dev/pandas.git
issue 48855 enable pylint unnecessary-pass (#49418) issue 48855 enable unnecessary-pass
10
0
40,592
6
1
6
def get_actual_gle_dict(name): return dict( frappe.db.sql( , name, ) )
erpnext/assets/doctype/asset_capitalization/test_asset_capitalization.py
33
erpnext
{ "docstring": "\n\t\tselect account, sum(debit-credit) as diff\n\t\tfrom `tabGL Entry`\n\t\twhere voucher_type = 'Asset Capitalization' and voucher_no = %s\n\t\tgroup by account\n\t\thaving diff != 0\n\t", "language": "en", "n_whitespaces": 19, "n_words": 24, "vocab_size": 22 }
9
Python
8
58d430fe3ee62e93ad8d16a08bb42156a25b7d41
test_asset_capitalization.py
69,222
13
20
get_actual_gle_dict
https://github.com/frappe/erpnext.git
feat: Asset Capitalization - manual selection of entry type - GLE cleanup with smaller functions - GLE considering periodical inventory - test cases
2
0
14,997
10
1
1
def doctor_output_no_config(): return
tests/conftest.py
13
thumbor
{ "docstring": "\nThumbor doctor will analyze your install and verify if everything is working as expected.\n\nVerifying libraries support...\n\n✅ pycurl is installed correctly.\n✅ cairosvg is installed correctly.\n\nVerifying thumbor compiled extensions...\n\n✅ _alpha\n✅ _bounding_box\n✅ _brightness\n✅ _colorize\n✅ _composite\n✅ _contrast\n✅ _convolution\n✅ _curve\n✅ _equalize\n✅ _fill\n✅ _nine_patch\n✅ _noise\n✅ _rgb\n✅ _round_corner\n✅ _saturation\n✅ _sharpen\n\nVerifying extension programs...\n\n✅ jpegtran is installed correctly.\n✅ ffmpeg is installed correctly.\n✅ gifsicle is installed correctly.\nVerifying security...\n\n\n🎉 Congratulations! No errors found! 🎉\n", "language": "en", "n_whitespaces": 62, "n_words": 89, "vocab_size": 52 }
3
Python
3
0e845259cd3d49b39889ae15df19922af0ef7269
conftest.py
191,181
38
6
doctor_output_no_config
https://github.com/thumbor/thumbor.git
Remove snapshottest to reduce number of dependencies (#1433) Having an extra package that can be replaced with something already included makes packaging easier. For instance, in Debian, one would have to either be fortunate to find an existing package or go over the trouble of creating such package and all its dependencies. I believe this CL is a good small compromise considering the benefit it brings.
6
0
46,464
6
5
18
def draw(G, pos=None, ax=None, **kwds): import matplotlib.pyplot as plt if ax is None: cf = plt.gcf() else: cf = ax.get_figure() cf.set_facecolor("w") if ax is None: if cf.axes: ax = cf.gca() else: ax = cf.add_axes((0, 0, 1, 1)) if "with_labels" not in kwds: kwds["with_labels"] = "labels" in kwds draw_networkx(G, pos=pos, ax=ax, **kwds) ax.set_axis_off() plt.draw_if_interactive() return
networkx/drawing/nx_pylab.py
206
networkx
{ "docstring": "Draw the graph G with Matplotlib.\n\n Draw the graph as a simple representation with no node\n labels or edge labels and using the full Matplotlib figure area\n and no axis labels by default. See draw_networkx() for more\n full-featured drawing that allows title, axis labels etc.\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n pos : dictionary, optional\n A dictionary with nodes as keys and positions as values.\n If not specified a spring layout positioning will be computed.\n See :py:mod:`networkx.drawing.layout` for functions that\n compute node positions.\n\n ax : Matplotlib Axes object, optional\n Draw the graph in specified Matplotlib axes.\n\n kwds : optional keywords\n See networkx.draw_networkx() for a description of optional keywords.\n\n Examples\n --------\n >>> G = nx.dodecahedral_graph()\n >>> nx.draw(G)\n >>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout\n\n See Also\n --------\n draw_networkx\n draw_networkx_nodes\n draw_networkx_edges\n draw_networkx_labels\n draw_networkx_edge_labels\n\n Notes\n -----\n This function has the same name as pylab.draw and pyplot.draw\n so beware when using `from networkx import *`\n\n since you might overwrite the pylab.draw function.\n\n With pyplot use\n\n >>> import matplotlib.pyplot as plt\n >>> G = nx.dodecahedral_graph()\n >>> nx.draw(G) # networkx draw()\n >>> plt.draw() # pyplot draw()\n\n Also see the NetworkX drawing examples at\n https://networkx.org/documentation/latest/auto_examples/index.html\n ", "language": "en", "n_whitespaces": 348, "n_words": 190, "vocab_size": 118 }
54
Python
39
7f3ec2c5906b709733a5c26285032bf24134bcf0
nx_pylab.py
177,150
18
125
draw
https://github.com/networkx/networkx.git
See matplotlb 3.6rc1 failure (#5937) * See matplotlb 3.6rc1 failure * replace use of private class method to allow mpl v3.6 to work. * ensure ax exists before calling colorbar * Undo matplotlib pin Co-authored-by: Dan Schult <[email protected]>
144
0
42,290
14
1
11
def adjacency_matrix(G, nodelist=None, dtype=None, weight="weight"): import warnings warnings.warn( "adjacency_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0.", FutureWarning, stacklevel=2, ) # TODO: Change to `to_scipy_sparse_array` for networkx 3.0 return nx.to_scipy_sparse_matrix(G, nodelist=nodelist, dtype=dtype, weight=weight)
networkx/linalg/graphmatrix.py
81
networkx
{ "docstring": "Returns adjacency matrix of G.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data-type, optional\n The desired data-type for the array.\n If None, then the NumPy default is used.\n\n weight : string or None, optional (default='weight')\n The edge data key used to provide each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n A : SciPy sparse matrix\n Adjacency matrix representation of G.\n\n Notes\n -----\n For directed graphs, entry i,j corresponds to an edge from i to j.\n\n If you want a pure Python adjacency matrix representation try\n networkx.convert.to_dict_of_dicts which will return a\n dictionary-of-dictionaries format that can be addressed as a\n sparse matrix.\n\n For MultiGraph/MultiDiGraph with parallel edges the weights are summed.\n See `to_numpy_array` for other options.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal matrix entry value to the edge weight attribute\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting Scipy sparse matrix can be modified as follows:\n\n >>> G = nx.Graph([(1, 1)])\n >>> A = nx.adjacency_matrix(G)\n >>> print(A.todense())\n [[1]]\n >>> A.setdiag(A.diagonal() * 2)\n >>> print(A.todense())\n [[2]]\n\n See Also\n --------\n to_numpy_array\n to_scipy_sparse_array\n to_dict_of_dicts\n adjacency_spectrum\n ", "language": "en", "n_whitespaces": 392, "n_words": 231, "vocab_size": 137 }
37
Python
35
5dfd57af2a141a013ae3753e160180b82bec9469
graphmatrix.py
176,190
8
52
adjacency_matrix
https://github.com/networkx/networkx.git
Use scipy.sparse array datastructure (#5139) * Step 1: use sparse arrays in nx.to_scipy_sparse_matrix. Seems like a reasonable place to start. nx.to_scipy_sparse_matrix is one of the primary interfaces to scipy.sparse from within NetworkX. * 1: Use np.outer instead of mult col/row vectors Fix two instances in modularitymatrix where a new 2D array was being created via an outer product of two \"vectors\". In the matrix case, this was a row vector \* a column vector. In the array case this can be disambiguated by being explicit with np.outer. * Update _transition_matrix in laplacianmatrix module - A few instances of matrix multiplication operator - Add np.newaxis + transpose to get shape right for broadcasting - Explicitly convert e.g. sp.sparse.spdiags to a csr_array. * Update directed_combinitorial_laplacian w/ sparse array. - Wrap spdiags in csr_array and update matmul operators. * Rm matrix-specific code from lgc and hmn modules - Replace .A call with appropriate array semantics - wrap sparse.diags in csr_array. * Change hits to use sparse array semantics. - Replace * with @ - Remove superfluous calls to flatten. * Update sparse matrix usage in layout module. - Simplify lil.getrowview call - Wrap spdiags in csr_array. * lil_matrix -> lil_array in graphmatrix.py. * WIP: Start working on algebraic connectivity module. * Incorporate auth mat varname feedback. * Revert 1D slice and comment for 1D sparse future. * Add TODOs: rm csr_array wrapper around spdiags etc. * WIP: cleanup algebraicconn: tracemin_fiedler. * Typo. * Finish reviewing algebraicconnectivity. * Convert bethe_hessian matrix to use sparse arrays. * WIP: update laplacian. Update undirected laplacian functions. * WIP: laplacian - add comment about _transition_matrix return types. * Finish laplacianmatrix review. * Update attrmatrix. * Switch to official laplacian function. * Update pagerank to use sparse array. * Switch bipartite matrix to sparse arrays. * Check from_scipy_sparse_matrix works with arrays. Modifies test suite. * Apply changes from review. * Fix failing docstring tests. * Fix missing axis for in-place multiplication. * Use scipy==1.8rc2 * Use matrix multiplication * Fix PyPy CI * [MRG] Create plot_subgraphs.py example (#5165) * Create plot_subgraphs.py https://github.com/networkx/networkx/issues/4220 * Update plot_subgraphs.py black * Update plot_subgraphs.py lint plus font_size * Update plot_subgraphs.py added more plots * Update plot_subgraphs.py removed plots from the unit test and added comments * Update plot_subgraphs.py lint * Update plot_subgraphs.py typos fixed * Update plot_subgraphs.py added nodes to the plot of the edges removed that was commented out for whatever reason * Update plot_subgraphs.py revert the latest commit - the line was commented out for a reason - it's broken * Update plot_subgraphs.py fixed node color issue * Update plot_subgraphs.py format fix * Update plot_subgraphs.py forgot to draw the nodes... now fixed * Fix sphinx warnings about heading length. * Update examples/algorithms/plot_subgraphs.py * Update examples/algorithms/plot_subgraphs.py Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]> * Add traveling salesman problem to example gallery (#4874) Adds an example of the using Christofides to solve the TSP problem to the example galery. Co-authored-by: Ross Barnowski <[email protected]> * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037) * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() * Resolved Requested Changes * Revert changes to degree docstrings. * Update comments in example. * Apply wording to edges method in all graph classes. Co-authored-by: Ross Barnowski <[email protected]> * Compatibility updates from testing with numpy/scipy/pytest rc's (#5226) * Rm deprecated scipy subpkg access. * Use recwarn fixture in place of deprecated pytest pattern. * Rm unnecessary try/except from tests. * Replace internal `close` fn with `math.isclose`. (#5224) * Replace internal close fn with math.isclose. * Fix lines in docstring examples. * Fix Python 3.10 deprecation warning w/ int div. (#5231) * Touchups and suggestions for subgraph gallery example (#5225) * Simplify construction of G with edges rm'd * Rm unused graph attribute. * Shorten categorization by node type. * Simplify node coloring. * Simplify isomorphism check. * Rm unit test. * Rm redundant plotting of each subgraph. * Use new package name (#5234) * Allowing None edges in weight function of bidirectional Dijkstra (#5232) * added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None. * changed syntax for better readability and code duplicate avoidance Co-authored-by: Hohmann, Nikolas <[email protected]> * Add an FAQ about assigning issues. (#5182) * Add FAQ about assigning issues. * Add note about linking issues from new PRs. * Update dev deps (#5243) * Update minor doc issues with tex notation (#5244) * Add FutureWarnings to fns that return sparse matrices - biadjacency_matrix. - bethe_hessian_matrix. - incidence_matrix. - laplacian functions. - modularity_matrix functions. - adjacency_matrix. * Add to_scipy_sparse_array and use it everywhere. Add a new conversion function to preserve array semantics internally while not altering behavior for users. Also adds FutureWarning to to_scipy_sparse_matrix. * Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix. * Handle deprecations in separate PR. * Fix docstring examples. Co-authored-by: Mridul Seth <[email protected]> Co-authored-by: Jarrod Millman <[email protected]> Co-authored-by: Andrew Knyazev <[email protected]> Co-authored-by: Dan Schult <[email protected]> Co-authored-by: eskountis <[email protected]> Co-authored-by: Anutosh Bhat <[email protected]> Co-authored-by: NikHoh <[email protected]> Co-authored-by: Hohmann, Nikolas <[email protected]> Co-authored-by: Sultan Orazbayev <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
76
0
41,756
8
1
18
def test_cancellation_while_holding_read_lock(self): rwlock = ReadWriteLock() key = "key" # 1. A reader takes the lock and blocks. reader_d, _, _ = self._start_blocking_reader(rwlock, key, "read completed") # 2. A writer waits for the reader to complete. writer_d, _ = self._start_nonblocking_writer(rwlock, key, "write completed") self.assertFalse(writer_d.called) # 3. The reader is cancelled. reader_d.cancel() self.failureResultOf(reader_d, CancelledError) # 4. The writer should take the lock and complete. self.assertTrue( writer_d.called, "Writer is stuck waiting for a cancelled reader" ) self.assertEqual("write completed", self.successResultOf(writer_d))
tests/util/test_rwlock.py
152
synapse
{ "docstring": "Test cancellation while holding a read lock.\n\n A waiting writer should be given the lock when the reader holding the lock is\n cancelled.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 19 }
76
Python
55
605d161d7d585847fd1bb98d14d5281daeac8e86
test_rwlock.py
247,584
12
88
test_cancellation_while_holding_read_lock
https://github.com/matrix-org/synapse.git
Add cancellation support to `ReadWriteLock` (#12120) Also convert `ReadWriteLock` to use async context managers. Signed-off-by: Sean Quah <[email protected]>
192
0
71,759
9
1
5
def is_re(obj) -> bool: return isinstance(obj, Pattern)
pandas/core/dtypes/inference.py
26
pandas
{ "docstring": "\n Check if the object is a regex pattern instance.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` is a regex pattern.\n\n Examples\n --------\n >>> is_re(re.compile(\".*\"))\n True\n >>> is_re(\"foo\")\n False\n ", "language": "en", "n_whitespaces": 84, "n_words": 34, "vocab_size": 29 }
7
Python
7
bce995817caf00ab5e82cb4cf1b540f1530cf4ea
inference.py
172,101
21
15
is_re
https://github.com/pandas-dev/pandas.git
Fix some dosctring RT02 error (#50197)
13
0
40,755
7
12
25
def partial_fit(self, X, y, classes=None, sample_weight=None): first_time = not hasattr(self, "estimators_") if first_time: self._validate_params() y = self._validate_data(X="no_validation", y=y, multi_output=True) if y.ndim == 1: raise ValueError( "y must have at least two dimensions for " "multi-output regression but has only one." ) if sample_weight is not None and not has_fit_parameter( self.estimator, "sample_weight" ): raise ValueError("Underlying estimator does not support sample weights.") self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_estimator)( self.estimators_[i] if not first_time else self.estimator, X, y[:, i], classes[i] if classes is not None else None, sample_weight, first_time, ) for i in range(y.shape[1]) ) if first_time and hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ if first_time and hasattr(self.estimators_[0], "feature_names_in_"): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self
sklearn/multioutput.py
332
scikit-learn
{ "docstring": "Incrementally fit a separate model for each class output.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n\n y : {array-like, sparse matrix} of shape (n_samples, n_outputs)\n Multi-output targets.\n\n classes : list of ndarray of shape (n_outputs,), default=None\n Each array is unique classes for one output in str/int.\n Can be obtained via\n ``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where `y`\n is the target matrix of the entire dataset.\n This argument is required for the first call to partial_fit\n and can be omitted in the subsequent calls.\n Note that `y` doesn't need to contain all labels in `classes`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If `None`, then samples are equally weighted.\n Only supported if the underlying regressor supports sample\n weights.\n\n Returns\n -------\n self : object\n Returns a fitted instance.\n ", "language": "en", "n_whitespaces": 349, "n_words": 136, "vocab_size": 100 }
107
Python
77
d942600e1f1979c431c24f59933a95155789f324
multioutput.py
260,558
30
214
partial_fit
https://github.com/scikit-learn/scikit-learn.git
MAINT add parameter_constraints for MultiOutputClassifier and MultiOutputRegressor (#23902) Co-authored-by: jeremiedbb <[email protected]>
421
0
76,339
13
1
8
def print_help(self): help_text = f console.print(text=help_text, menu="Stocks - Due Diligence")
gamestonk_terminal/stocks/due_diligence/dd_controller.py
47
OpenBBTerminal
{ "docstring": "Print help\n[param]Ticker: [/param]{self.ticker}[cmds]\n\n[src][Finviz][/src]\n analyst analyst prices and ratings of the company\n[src][FMP][/src]\n rating rating over time (daily)\n[src][Finnhub][/src]\n rot number of analysts ratings over time (monthly)\n[src][Business Insider][/src]\n pt price targets over time\n est quarter and year analysts earnings estimates\n[src][Market Watch][/src]\n sec SEC filings\n[src][Csimarket][/src]\n supplier list of suppliers\n customer list of customers\n[src][Cathiesark.com][/src]\n arktrades get ARK trades for ticker[/cmds]\n ", "language": "en", "n_whitespaces": 157, "n_words": 63, "vocab_size": 50 }
10
Python
10
82747072c511beb1b2672846ae2ee4aec53eb562
dd_controller.py
281,541
22
22
print_help
https://github.com/OpenBB-finance/OpenBBTerminal.git
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
31
0
83,839
9
5
13
def get_preprocess_function(self, field, value, export_format): # Try to find a field specific function and return it format_dict = self.custom_field_preprocess.get(field, {}) if export_format in format_dict: return format_dict[export_format] # Otherwise check for a value class specific function for value_classes, format_dict in self.custom_value_preprocess.items(): if isinstance(value, value_classes) and export_format in format_dict: return format_dict[export_format] # Finally resort to force_str to prevent encoding errors return force_str
wagtail/admin/views/mixins.py
105
wagtail
{ "docstring": "Returns the preprocessing function for a given field name, field value, and export format", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
60
Python
40
d10f15e55806c6944827d801cd9c2d53f5da4186
mixins.py
72,424
8
67
get_preprocess_function
https://github.com/wagtail/wagtail.git
Reformat with black
153
0
15,891
10
3
11
def get_corrected_cpu(cpu_count): # formerlly get_cpu_capacity from django.conf import settings settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None) env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None) if env_abscpu is not None: return convert_cpu_str_to_decimal_cpu(env_abscpu) elif settings_abscpu is not None: return convert_cpu_str_to_decimal_cpu(settings_abscpu) return cpu_count # no correction
awx/main/utils/common.py
94
awx
{ "docstring": "Some environments will do a correction to the reported CPU number\n because the given OpenShift value is a lie\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 17 }
37
Python
27
799968460d4794bcd9959f57a2b97846b9a00bb7
common.py
80,659
9
56
get_corrected_cpu
https://github.com/ansible/awx.git
Fixup conversion of memory and cpu settings to support k8s resource request format (#11725) fix memory and cpu settings to suport k8s resource request format * fix conversion of memory setting to bytes This setting has not been getting set by default, and needed some fixing up to be compatible with setting the memory in the same way as we set it in the operator, as well as with other changes from last year which assume that ansible runner is returning memory in bytes. This way we can start setting this setting in the operator, and get a more accurate reflection of how much memory is available to the control pod in k8s. On platforms where services are all sharing memory, we deduct a penalty from the memory available. On k8s we don't need to do this because the web, redis, and task containers each have memory allocated to them. * Support CPU setting expressed in units used by k8s This setting has not been getting set by default, and needed some fixing up to be compatible with setting the CPU resource request/limits in the same way as we set it in the resource requests/limits. This way we can start setting this setting in the operator, and get a more accurate reflection of how much cpu is available to the control pod in k8s. Because cpu on k8s can be partial cores, migrate cpu field to decimal. k8s does not allow granularity of less than 100m (equivalent to 0.1 cores), so only store up to 1 decimal place. fix analytics to deal with decimal cpu need to use DjangoJSONEncoder when Decimal fields in data passed to json.dumps
74
0
17,088
10
1
3
def clear(self): raise NotImplementedError
python/ray/air/execution/resources/resource_manager.py
16
ray
{ "docstring": "Reset internal state and clear all resources.\n\n Calling this method will reset the resource manager to its initialization state.\n All resources will be removed.\n\n Clearing the state will remove tracked resources from the manager, but there are\n no guarantees about the tasks and actors scheduled on the resources. The caller\n should make sure that any references to tasks or actors scheduled on the\n resources have been removed before calling ``clear()``.\n ", "language": "en", "n_whitespaces": 119, "n_words": 70, "vocab_size": 53 }
4
Python
4
edb17fd2069844f12237c85ba6607afae536401d
resource_manager.py
138,043
2
8
clear
https://github.com/ray-project/ray.git
[air/tune] Internal resource management 1 - Ray AIR resource manager implementation (#30777) Prerequisite to #30016 This PR adds a new Ray AIR resource manager to replace the PlacementGroupManager of Ray Tune. Details can be found in #30016. Specifically, this PR - Adds the main resource manager abstractions - Renames (and moves) PlacementGroupFactory to ResourceRequest - Adds implementations and tests for a placement group based manager and a budget based manager Signed-off-by: Kai Fricke <[email protected]> Signed-off-by: Kai Fricke <[email protected]> Co-authored-by: matthewdeng <[email protected]>
18
0
31,286
6
2
4
def _supports_universal_builds(): # As an approximation, we assume that if we are running on 10.4 or above, # then we are running with an Xcode environment that supports universal # builds, in particular -isysroot and -arch arguments to the compiler. This # is in support of allowing 10.4 universal builds to run on 10.3.x systems. osx_version = _get_system_version_tuple() return bool(osx_version >= (10, 4)) if osx_version else False
python3.10.4/Lib/_osx_support.py
46
XX-Net
{ "docstring": "Returns True if universal builds are supported on this system", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
67
Python
51
8198943edd73a363c266633e1aa5b2a9e9c9f526
_osx_support.py
219,598
3
25
_supports_universal_builds
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
88
0
55,636
10
7
18
def symmetric_poly(n, *gens, **args): # TODO: use an explicit keyword argument gens = _analyze_gens(gens) if n < 0 or n > len(gens) or not gens: raise ValueError("Cannot generate symmetric polynomial of order %s for %s" % (n, gens)) elif not n: poly = S.One else: poly = Add(*[Mul(*s) for s in subsets(gens, int(n))]) if not args.get('polys', False): return poly else: return Poly(poly, *gens) @public
sympy/polys/specialpolys.py
174
@public
sympy
{ "docstring": "Generates symmetric polynomial of order `n`.\n\n Returns a Poly object when ``polys=True``, otherwise\n (default) returns an expression.\n ", "language": "en", "n_whitespaces": 26, "n_words": 17, "vocab_size": 17 }
64
Python
52
337e5c51b1ae7e202b7d7c62107fab6d5ea58d93
specialpolys.py
195,838
12
103
symmetric_poly
https://github.com/sympy/sympy.git
Removed even more Python 2-support
122
1
47,432
18
1
23
def test_ensure_print_span_characteristics_wont_fail(): nlp = English() spans_key = "sc" pred = Doc(nlp.vocab, words=["Welcome", "to", "the", "Bank", "of", "China", "."]) pred.spans[spans_key] = [Span(pred, 3, 6, "ORG"), Span(pred, 5, 6, "GPE")] ref = Doc(nlp.vocab, words=["Welcome", "to", "the", "Bank", "of", "China", "."]) ref.spans[spans_key] = [Span(ref, 3, 6, "ORG"), Span(ref, 5, 6, "GPE")] eg = Example(pred, ref) examples = [eg] data = _compile_gold(examples, ["spancat"], nlp, True) span_characteristics = _get_span_characteristics( examples=examples, compiled_gold=data, spans_key=spans_key ) _print_span_characteristics(span_characteristics) @pytest.mark.parametrize("threshold", [70, 80, 85, 90, 95])
spacy/tests/test_cli.py
309
@pytest.mark.parametrize("threshold", [70, 80, 85, 90, 95])
spaCy
{ "docstring": "Test if interface between two methods aren't destroyed if refactored", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
75
Python
51
1d34aa2b3dd1ba0931dcb1863dfbeba6ae5b912d
test_cli.py
111,324
14
172
test_ensure_print_span_characteristics_wont_fail
https://github.com/explosion/spaCy.git
Add spacy-span-analyzer to debug data (#10668) * Rename to spans_key for consistency * Implement spans length in debug data * Implement how span bounds and spans are obtained In this commit, I implemented how span boundaries (the tokens) around a given span and spans are obtained. I've put them in the compile_gold() function so that it's accessible later on. I will do the actual computation of the span and boundary distinctiveness in the main function above. * Compute for p_spans and p_bounds * Add computation for SD and BD * Fix mypy issues * Add weighted average computation * Fix compile_gold conditional logic * Add test for frequency distribution computation * Add tests for kl-divergence computation * Fix weighted average computation * Make tables more compact by rounding them * Add more descriptive checks for spans * Modularize span computation methods In this commit, I added the _get_span_characteristics and _print_span_characteristics functions so that they can be reusable anywhere. * Remove unnecessary arguments and make fxs more compact * Update a few parameter arguments * Add tests for print_span and get_span methods * Update API to talk about span characteristics in brief * Add better reporting of spans_length * Add test for span length reporting * Update formatting of span length report Removed '' to indicate that it's not a string, then sort the n-grams by their length, not by their frequency. * Apply suggestions from code review Co-authored-by: Adriane Boyd <[email protected]> * Show all frequency distribution when -V In this commit, I displayed the full frequency distribution of the span lengths when --verbose is passed. To make things simpler, I rewrote some of the formatter functions so that I can call them whenever. Another notable change is that instead of showing percentages as Integers, I showed them as floats (max 2-decimal places). I did this because it looks weird when it displays (0%). * Update logic on how total is computed The way the 90% thresholding is computed now is that we keep adding the percentages until we reach >= 90%. I also updated the wording and used the term "At least" to denote that >= 90% of your spans have these distributions. * Fix display when showing the threshold percentage * Apply suggestions from code review Co-authored-by: Adriane Boyd <[email protected]> * Add better phrasing for span information * Update spacy/cli/debug_data.py Co-authored-by: Adriane Boyd <[email protected]> * Add minor edits for whitespaces etc. Co-authored-by: Adriane Boyd <[email protected]> Co-authored-by: Adriane Boyd <[email protected]>
120
1
24,375
11
4
14
def find_path_to_setup_from_repo_root(location, repo_root): # type: (str, str) -> Optional[str] # find setup.py orig_location = location while not os.path.exists(os.path.join(location, 'setup.py')): last_location = location location = os.path.dirname(location) if location == last_location: # We've traversed up to the root of the filesystem without # finding setup.py logger.warning( "Could not find setup.py for directory %s (tried all " "parent directories)", orig_location, ) return None if os.path.samefile(repo_root, location): return None return os.path.relpath(location, repo_root)
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
145
transferlearning
{ "docstring": "\n Find the path to `setup.py` by searching up the filesystem from `location`.\n Return the path to `setup.py` relative to `repo_root`.\n Return None if `setup.py` is in `repo_root` or cannot be found.\n ", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 23 }
68
Python
51
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
versioncontrol.py
61,395
15
86
find_path_to_setup_from_repo_root
https://github.com/jindongwang/transferlearning.git
upd; format
217
0
12,543
13
1
2
def isomin(self): return self["isomin"]
packages/python/plotly/plotly/graph_objs/_isosurface.py
22
plotly.py
{ "docstring": "\n Sets the minimum boundary for iso-surface plot.\n\n The 'isomin' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 78, "n_words": 26, "vocab_size": 26 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_isosurface.py
227,305
2
11
isomin
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,978
7
7
4
def _configure_matplotlib(cls): rcParams["keymap.fullscreen"] = [k for k in rcParams["keymap.fullscreen"] if k != "f"] rcParams["keymap.save"] = [k for k in rcParams["keymap.save"] if k != "s"] rcParams["keymap.home"] = [k for k in rcParams["keymap.home"] if k != "r"] rcParams["figure.raise_window"] = False
scripts/train.py
123
faceswap
{ "docstring": " Remove `F`, 'S' and 'R' from their default bindings and stop Matplotlib from stealing\n focus ", "language": "en", "n_whitespaces": 23, "n_words": 15, "vocab_size": 13 }
38
Python
17
c8122bc499afba4fcb99030e42e08bfb8d3a75e1
train.py
101,053
5
69
_configure_matplotlib
https://github.com/deepfakes/faceswap.git
bugfix: Stop preview window from stealing focus
73
0
20,490
10
4
20
def _per_replica_aggregate_batch(strategy, batch_outs, model, mode): if strategy is not None and mode == ModeKeys.PREDICT: total_batch_outs = [] for i in range(len(model.outputs)): num_replicas = strategy.num_replicas_in_sync nested_outs = batch_outs[ i * num_replicas : i * num_replicas + num_replicas ] total_batch_outs.append( concat_along_batch_dimension(tf.nest.flatten(nested_outs)) ) return total_batch_outs return batch_outs
keras/distribute/distributed_training_utils_v1.py
125
keras
{ "docstring": "Aggregates the per-replica batch-level outputs from a distributed step.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
44
Python
34
84afc5193d38057e2e2badf9c889ea87d80d8fbf
distributed_training_utils_v1.py
270,352
13
80
_per_replica_aggregate_batch
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
159
0
80,449
16
1
16
def test_get_bad_image(self): # Get response = self.client.get( reverse( "wagtailimages:generate_url", args=(self.image.id + 1, "fill-800x600") ) ) # Check response self.assertEqual(response.status_code, 404) self.assertEqual(response["Content-Type"], "application/json") # Check JSON self.assertJSONEqual( response.content.decode(), json.dumps( { "error": "Cannot find image.", } ), )
wagtail/images/tests/test_admin_views.py
137
wagtail
{ "docstring": "\n This tests that the view gives a 404 response if a user attempts to use it with an image which doesn't exist\n ", "language": "en", "n_whitespaces": 37, "n_words": 22, "vocab_size": 21 }
36
Python
30
d10f15e55806c6944827d801cd9c2d53f5da4186
test_admin_views.py
75,173
16
79
test_get_bad_image
https://github.com/wagtail/wagtail.git
Reformat with black
225
0
16,373
15
4
18
def to_dict(self, is_png=False) -> MaskAlignmentsFileDict: assert self._mask is not None affine_matrix = self.affine_matrix.tolist() if is_png else self.affine_matrix retval = MaskAlignmentsFileDict(mask=self._mask, affine_matrix=affine_matrix, interpolator=self.interpolator, stored_size=self.stored_size, stored_centering=self.stored_centering) logger.trace({k: v if k != "mask" else type(v) for k, v in retval.items()}) # type: ignore return retval
lib/align/detected_face.py
149
faceswap
{ "docstring": " Convert the mask to a dictionary for saving to an alignments file\n\n Parameters\n ----------\n is_png: bool\n ``True`` if the dictionary is being created for storage in a png header otherwise\n ``False``. Default: ``False``\n\n Returns\n -------\n dict:\n The :class:`Mask` for saving to an alignments file. Contains the keys ``mask``,\n ``affine_matrix``, ``interpolator``, ``stored_size``, ``stored_centering``\n ", "language": "en", "n_whitespaces": 146, "n_words": 52, "vocab_size": 41 }
42
Python
37
5e73437be47f2410439a3c6716de96354e6a0c94
detected_face.py
101,226
24
97
to_dict
https://github.com/deepfakes/faceswap.git
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
241
0
20,646
12
3
13
def getgeneratorlocals(generator): if not isgenerator(generator): raise TypeError("{!r} is not a Python generator".format(generator)) frame = getattr(generator, "gi_frame", None) if frame is not None: return generator.gi_frame.f_locals else: return {} # ------------------------------------------------ coroutine introspection CORO_CREATED = 'CORO_CREATED' CORO_RUNNING = 'CORO_RUNNING' CORO_SUSPENDED = 'CORO_SUSPENDED' CORO_CLOSED = 'CORO_CLOSED'
python3.10.4/Lib/inspect.py
115
XX-Net
{ "docstring": "\n Get the mapping of generator local variables to their current values.\n\n A dict is returned, with the keys the local variable names and values the\n bound values.", "language": "en", "n_whitespaces": 36, "n_words": 27, "vocab_size": 22 }
43
Python
33
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,447
8
50
getgeneratorlocals
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
74
0
55,315
12
1
8
async def setup(self) -> None: await self._update_gauges() self._clock.looping_call( run_as_background_process, 5 * 60 * 1000, desc="common_usage_metrics_update_gauges", func=self._update_gauges, )
synapse/metrics/common_usage_metrics.py
65
synapse
{ "docstring": "Keep the gauges for common usage metrics up to date.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
17
Python
16
898fef2789c9b1a20ef53c7d588f536f51f0fe2f
common_usage_metrics.py
249,451
9
39
setup
https://github.com/matrix-org/synapse.git
Share some metrics between the Prometheus exporter and the phone home stats (#13671)
89
0
72,923
9
2
23
def query_trial(request): trial_id = request.GET.get("trial_id") trials = TrialRecord.objects.filter(trial_id=trial_id).order_by("-start_time") if len(trials) == 0: resp = "Unkonwn trial id %s.\n" % trials else: trial = trials[0] result = { "trial_id": trial.trial_id, "job_id": trial.job_id, "trial_status": trial.trial_status, "start_time": trial.start_time, "end_time": trial.end_time, "params": trial.params, } resp = json.dumps(result) return HttpResponse(resp, content_type="application/json;charset=utf-8")
python/ray/tune/automlboard/frontend/query.py
192
ray
{ "docstring": "Rest API to query the trial info, with the given trial_id.\n\n The url pattern should be like this:\n\n curl http://<server>:<port>/query_trial?trial_id=<trial_id>\n\n The response may be:\n\n {\n \"app_url\": \"None\",\n \"trial_status\": \"TERMINATED\",\n \"params\": {'a': 1, 'b': 2},\n \"job_id\": \"asynchyperband_test\",\n \"end_time\": \"2018-07-19 20:49:44\",\n \"start_time\": \"2018-07-19 20:49:40\",\n \"trial_id\": \"2067R2ZD\",\n }\n ", "language": "en", "n_whitespaces": 112, "n_words": 45, "vocab_size": 42 }
46
Python
38
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
query.py
132,073
17
111
query_trial
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
165
0
29,666
12
1
31
def test_transaction_outcome_accepted(self): manager = EventManager( make_event( transaction="wait", contexts={ "trace": { "parent_span_id": "bce14471e0e9654d", "op": "foobar", "trace_id": "a0fa8803753e40fd8124b21eeb2986b5", "span_id": "bf5be759039ede9a", } }, spans=[], timestamp=iso_format(before_now(minutes=5)), start_timestamp=iso_format(before_now(minutes=5)), type="transaction", platform="python", ) ) manager.normalize() mock_track_outcome = mock.Mock() with mock.patch("sentry.event_manager.track_outcome", mock_track_outcome): with self.feature({"organizations:transaction-metrics-extraction": False}): manager.save(self.project.id) assert_mock_called_once_with_partial( mock_track_outcome, outcome=Outcome.ACCEPTED, category=DataCategory.TRANSACTION )
tests/sentry/event_manager/test_event_manager.py
244
sentry
{ "docstring": "\n Without metrics extraction, we count the number of accepted transaction\n events in the TRANSACTION data category. This maintains compatibility\n with Sentry installations that do not have a metrics pipeline.\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 27 }
43
Python
39
abcccb3fe46fb8479687b77e8bce07dc5df13c90
test_event_manager.py
88,863
27
140
test_transaction_outcome_accepted
https://github.com/getsentry/sentry.git
fix(event_manager): Emit TRANSACTION outcomes if metrics are disabled (#41607) In #40507 we started to count transaction metrics in the `transaction` data category and transaction events in the `transaction_indexed` data category. That PR missed that metrics extraction can be disabled, in which case the old behavior of counting events as `transaction` should remain. Relay already implemented this logic since getsentry/relay#1537 based on the metrics extraction flag. This PR adds a feature check to the `organizations:transaction-metrics-extraction` feature, which is the same feature flag used to control Relay's behavior. We also remove the previously used option to sample a percentage of organizations into metrics extraction. The default for this feature remains off (`false`) until metrics components have been added to all deployment targets including self-hosted. Co-authored-by: Matej Minar <[email protected]>
408
0
18,459
16
5
19
def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( f"You called `set_weights(weights)` on optimizer {self._name} " f"with a weight list of length {str(len(weights))}, " f"but the optimizer was expecting {str(len(params))} " f"weights. Provided weights: {str(weights)[:50]}..." ) if not params: return weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( f"Optimizer weight shape {str(pv.shape)} " "not compatible with " f"provided weight shape {str(w.shape)}." ) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples)
keras/optimizers/optimizer_v2/optimizer_v2.py
241
keras
{ "docstring": "Set the weights of the optimizer.\n\n The weights of an optimizer are its state (ie, variables).\n This function takes the weight values associated with this\n optimizer as a list of Numpy arrays. The first value is always the\n iterations count of the optimizer, followed by the optimizer's state\n variables in the order they are created. The passed values are used to set\n the new state of the optimizer.\n\n For example, the RMSprop optimizer for this simple model takes a list of\n three values-- the iteration count, followed by the root-mean-square value\n of the kernel and bias of the single Dense layer:\n\n >>> opt = tf.keras.optimizers.RMSprop()\n >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])\n >>> m.compile(opt, loss='mse')\n >>> data = np.arange(100).reshape(5, 20)\n >>> labels = np.zeros(5)\n >>> results = m.fit(data, labels) # Training.\n >>> new_weights = [np.array(10), np.ones([20, 10]), np.zeros([10])]\n >>> opt.set_weights(new_weights)\n >>> opt.iterations\n <tf.Variable 'RMSprop/iter:0' shape=() dtype=int64, numpy=10>\n\n Args:\n weights: weight values as a list of numpy arrays.\n ", "language": "en", "n_whitespaces": 313, "n_words": 154, "vocab_size": 96 }
80
Python
63
84afc5193d38057e2e2badf9c889ea87d80d8fbf
optimizer_v2.py
275,511
22
103
set_weights
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
339
0
81,406
17
4
17
def delete(self) -> None: try: if hasattr(self.object, 'close'): self.object.close() self._logger.info(self.item.arguments) if self.item.arguments.get('identity'): self._logger.success( f'{colored(self.item.arguments["identity"], "cyan")} is removed!' ) else: self._logger.success('object is removed!') else: self._logger.warning(f'nothing to close. exiting') except Exception as e: self._logger.error(f'{e!r}') raise else: self.item = PartialStoreItem()
daemon/stores/partial.py
214
jina
{ "docstring": "Terminates the object in the store & stops the server", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
37
Python
33
933415bfa1f9eb89f935037014dfed816eb9815d
partial.py
9,815
19
105
delete
https://github.com/jina-ai/jina.git
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
275
0
1,707
20
2
9
def markInputline(self, markerString=">!<"): line_str = self.line line_column = self.column - 1 if markerString: line_str = "".join((line_str[:line_column], markerString, line_str[line_column:])) return line_str.strip()
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
88
transferlearning
{ "docstring": "Extracts the exception line from the input string, and marks\n the location of the exception with a special symbol.\n ", "language": "en", "n_whitespaces": 36, "n_words": 19, "vocab_size": 15 }
20
Python
17
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,453
7
53
markInputline
https://github.com/jindongwang/transferlearning.git
upd; format
97
0
13,311
13
15
12
def convert_indexed_to_array(expr, first_indices=None): r result, indices = _convert_indexed_to_array(expr) if any(isinstance(i, (int, Integer)) for i in indices): result = ArrayElement(result, indices) indices = [] if not first_indices: return result
sympy/tensor/array/expressions/conv_indexed_to_array.py
87
sympy
{ "docstring": "\n Parse indexed expression into a form useful for code generation.\n\n Examples\n ========\n\n >>> from sympy.tensor.array.expressions.conv_indexed_to_array import convert_indexed_to_array\n >>> from sympy import MatrixSymbol, Sum, symbols\n\n >>> i, j, k, d = symbols(\"i j k d\")\n >>> M = MatrixSymbol(\"M\", d, d)\n >>> N = MatrixSymbol(\"N\", d, d)\n\n Recognize the trace in summation form:\n\n >>> expr = Sum(M[i, i], (i, 0, d-1))\n >>> convert_indexed_to_array(expr)\n ArrayContraction(M, (0, 1))\n\n Recognize the extraction of the diagonal by using the same index `i` on\n both axes of the matrix:\n\n >>> expr = M[i, i]\n >>> convert_indexed_to_array(expr)\n ArrayDiagonal(M, (0, 1))\n\n This function can help perform the transformation expressed in two\n different mathematical notations as:\n\n `\\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \\Longrightarrow \\mathbf{A}\\cdot \\mathbf{B}`\n\n Recognize the matrix multiplication in summation form:\n\n >>> expr = Sum(M[i, j]*N[j, k], (j, 0, d-1))\n >>> convert_indexed_to_array(expr)\n ArrayContraction(ArrayTensorProduct(M, N), (1, 2))\n\n Specify that ``k`` has to be the starting index:\n\n >>> convert_indexed_to_array(expr, first_indices=[k])\n ArrayContraction(ArrayTensorProduct(N, M), (0, 3))\n ", "language": "en", "n_whitespaces": 236, "n_words": 151, "vocab_size": 107 }
28
Python
23
0aabd1d7b8c3cb521f713ea925a0bf019ba1f3ca
conv_indexed_to_array.py
196,010
62
191
convert_indexed_to_array
https://github.com/sympy/sympy.git
Extend conversion function of indexed expression to arrays to support broadcasting and addition of different indices
60
0
47,511
10
7
22
def is_symbolic_tensor(tensor): if isinstance(tensor, tf.Tensor): return hasattr(tensor, "graph") elif is_extension_type(tensor): component_tensors = tf.nest.flatten(tensor, expand_composites=True) return any(hasattr(t, "graph") for t in component_tensors) elif isinstance(tensor, tf.Variable): # Variables that are output of a Keras Layer in Functional API mode # should be considered symbolic. # TODO(omalleyt): We need a better way to check this in order to # enable `run_eagerly=True` for Models containing Layers that # return Variables as outputs. return ( getattr(tensor, "_keras_history", False) or not tf.executing_eagerly() ) elif isinstance(tensor, tuple(_user_convertible_tensor_types)): tensor = ops.convert_to_tensor_or_composite(tensor) return is_symbolic_tensor(tensor) else: return False @keras_export("keras.__internal__.utils.register_symbolic_tensor_type", v1=[])
keras/utils/tf_utils.py
205
@keras_export("keras.__internal__.utils.register_symbolic_tensor_type", v1=[])
keras
{ "docstring": "Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.\n\n A Variable can be seen as either: it is considered symbolic\n when we are in a graph scope, and eager when we are in an eager scope.\n\n Args:\n tensor: A tensor instance to test.\n\n Returns:\n True for symbolic tensors, False for eager tensors.\n ", "language": "en", "n_whitespaces": 82, "n_words": 57, "vocab_size": 41 }
90
Python
68
84afc5193d38057e2e2badf9c889ea87d80d8fbf
tf_utils.py
277,089
16
113
is_symbolic_tensor
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
220
1
81,861
13
10
36
def _on_size(self, event): _log.debug("%s - _on_size()", type(self)) sz = self.GetParent().GetSizer() if sz: si = sz.GetItem(self) if sz and si and not si.Proportion and not si.Flag & wx.EXPAND: # managed by a sizer, but with a fixed size size = self.GetMinSize() else: # variable size size = self.GetClientSize() # Do not allow size to become smaller than MinSize size.IncTo(self.GetMinSize()) if getattr(self, "_width", None): if size == (self._width, self._height): # no change in size return self._width, self._height = size self._isDrawn = False if self._width <= 1 or self._height <= 1: return # Empty figure # Create a new, correctly sized bitmap self.bitmap = wx.Bitmap(self._width, self._height) dpival = self.figure.dpi winch = self._width / dpival hinch = self._height / dpival self.figure.set_size_inches(winch, hinch, forward=False) # Rendering will happen on the associated paint event # so no need to do anything here except to make sure # the whole background is repainted. self.Refresh(eraseBackground=False) ResizeEvent("resize_event", self)._process()
lib/matplotlib/backends/backend_wx.py
344
matplotlib
{ "docstring": "\n Called when wxEventSize is generated.\n\n In this application we attempt to resize to fit the window, so it\n is better to take the performance hit and redraw the whole window.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 25 }
149
Python
101
4e21912d2938b0e8812c4d1f7cd902c080062ff2
backend_wx.py
108,945
24
207
_on_size
https://github.com/matplotlib/matplotlib.git
Make it easier to improve UI event metadata. Currently, UI events (MouseEvent, KeyEvent, etc.) are generated by letting the GUI-specific backends massage the native event objects into a list of args/kwargs and then call `FigureCanvasBase.motion_notify_event`/`.key_press_event`/etc. This makes it a bit tricky to improve the metadata on the events, because one needs to change the signature on both the `FigureCanvasBase` method and the event class. Moreover, the `motion_notify_event`/etc. methods are directly bound as event handlers in the gtk3 and tk backends, and thus have incompatible signatures there. Instead, the native GUI handlers can directly construct the relevant event objects and trigger the events themselves; a new `Event._process` helper method makes this even shorter (and allows to keep factoring some common functionality e.g. for tracking the last pressed button or key). As an example, this PR also updates figure_leave_event to always correctly set the event location based on the *current* cursor position, instead of the last triggered location event (which may be outdated); this can now easily be done on a backend-by-backend basis, instead of coordinating the change with FigureCanvasBase.figure_leave_event. This also exposed another (minor) issue, in that resize events often trigger *two* calls to draw_idle -- one in the GUI-specific handler, and one in FigureCanvasBase.draw_idle (now moved to ResizeEvent._process, but should perhaps instead be a callback autoconnected to "resize_event") -- could probably be fixed later.
426
0
23,396
12
1
26
def test_subdag_pools_no_possible_conflict(self): dag = DAG('parent', default_args=default_args) subdag = DAG('parent.child', default_args=default_args) session = airflow.settings.Session() pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1) pool_10 = airflow.models.Pool(pool='test_pool_10', slots=10) session.add(pool_1) session.add(pool_10) session.commit() EmptyOperator(task_id='dummy', dag=subdag, pool='test_pool_10') mock_session = Mock() SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_1', session=mock_session) assert not mock_session.query.called session.delete(pool_1) session.delete(pool_10) session.commit()
tests/operators/test_subdag_operator.py
250
airflow
{ "docstring": "\n Subdags and subdag tasks with no pool overlap, should not to query\n pools\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
41
Python
34
49e336ae0302b386a2f47269a6d13988382d975f
test_subdag_operator.py
47,649
16
149
test_subdag_pools_no_possible_conflict
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
153
0
9,190
10
24
35
def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None): r if len(G) == 0: raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") elif None in G: raise nx.NetworkXError("None is not a valid node.") # Index pairs of interest for efficient lookup from either side. if pairs is not None: pair_dict = defaultdict(set) # See note on all_pairs_lowest_common_ancestor. if not isinstance(pairs, (Mapping, Set)): pairs = set(pairs) for u, v in pairs: for n in (u, v): if n not in G: msg = f"The node {str(n)} is not in the digraph." raise nx.NodeNotFound(msg) pair_dict[u].add(v) pair_dict[v].add(u) # If root is not specified, find the exactly one node with in degree 0 and # use it. Raise an error if none are found, or more than one is. Also check # for any nodes with in degree larger than 1, which would imply G is not a # tree. if root is None: for n, deg in G.in_degree: if deg == 0: if root is not None: msg = "No root specified and tree has multiple sources." raise nx.NetworkXError(msg) root = n elif deg > 1: msg = "Tree LCA only defined on trees; use DAG routine." raise nx.NetworkXError(msg) if root is None: raise nx.NetworkXError("Graph contains a cycle.") # Iterative implementation of Tarjan's offline lca algorithm # as described in CLRS on page 521 (2nd edition)/page 584 (3rd edition) uf = UnionFind() ancestors = {} for node in G: ancestors[node] = uf[node] colors = defaultdict(bool) for node in nx.dfs_postorder_nodes(G, root): colors[node] = True for v in pair_dict[node] if pairs is not None else G: if colors[v]: # If the user requested both directions of a pair, give it. # Otherwise, just give one. if pairs is not None and (node, v) in pairs: yield (node, v), ancestors[uf[v]] if pairs is None or (v, node) in pairs: yield (v, node), ancestors[uf[v]] if node != root: parent = arbitrary_element(G.pred[node]) uf.union(parent, node) ancestors[uf[parent]] = parent @not_implemented_for("undirected") @not_implemented_for("multigraph")
networkx/algorithms/lowest_common_ancestors.py
573
@not_implemented_for("undirected") @not_implemented_for("multigraph")
networkx
{ "docstring": "Yield the lowest common ancestor for sets of pairs in a tree.\n\n Parameters\n ----------\n G : NetworkX directed graph (must be a tree)\n\n root : node, optional (default: None)\n The root of the subtree to operate on.\n If None, assume the entire graph has exactly one source and use that.\n\n pairs : iterable or iterator of pairs of nodes, optional (default: None)\n The pairs of interest. If None, Defaults to all pairs of nodes\n under `root` that have a lowest common ancestor.\n\n Returns\n -------\n lcas : generator of tuples `((u, v), lca)` where `u` and `v` are nodes\n in `pairs` and `lca` is their lowest common ancestor.\n\n Examples\n --------\n >>> import pprint\n >>> G = nx.DiGraph([(1, 3), (2, 4), (1, 2)])\n >>> pprint.pprint(dict(nx.tree_all_pairs_lowest_common_ancestor(G)))\n {(1, 1): 1,\n (2, 1): 1,\n (2, 2): 2,\n (3, 1): 1,\n (3, 2): 1,\n (3, 3): 3,\n (3, 4): 1,\n (4, 1): 1,\n (4, 2): 2,\n (4, 4): 4}\n\n We can also use `pairs` argument to specify the pairs of nodes for which we\n want to compute lowest common ancestors. Here is an example:\n\n >>> dict(nx.tree_all_pairs_lowest_common_ancestor(G, pairs=[(1, 4), (2, 3)]))\n {(2, 3): 1, (1, 4): 1}\n\n Notes\n -----\n Only defined on non-null trees represented with directed edges from\n parents to children. Uses Tarjan's off-line lowest-common-ancestors\n algorithm. Runs in time $O(4 \\times (V + E + P))$ time, where 4 is the largest\n value of the inverse Ackermann function likely to ever come up in actual\n use, and $P$ is the number of pairs requested (or $V^2$ if all are needed).\n\n Tarjan, R. E. (1979), \"Applications of path compression on balanced trees\",\n Journal of the ACM 26 (4): 690-715, doi:10.1145/322154.322161.\n\n See Also\n --------\n all_pairs_lowest_common_ancestor: similar routine for general DAGs\n lowest_common_ancestor: just a single pair for general DAGs\n ", "language": "en", "n_whitespaces": 457, "n_words": 290, "vocab_size": 186 }
314
Python
173
abaa68779ccb4cce8d1a5ecade622ab96d01edeb
lowest_common_ancestors.py
176,977
102
345
tree_all_pairs_lowest_common_ancestor
https://github.com/networkx/networkx.git
Add examples to lowest common ancestors algorithms (#5531) * Add examples to lowest common ancestors documentation * Fix output style of examples * Fix output style of example * Update pre-commit * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Indentation fix & pprint dictionary * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Move "import pprint" to the example Co-authored-by: dtuncturk <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
808
1
42,205
18
1
11
def view_transformation(E, R, V, roll): u, v, w = _view_axes(E, R, V, roll) M = _view_transformation_uvw(u, v, w, E) return M
lib/mpl_toolkits/mplot3d/proj3d.py
59
matplotlib
{ "docstring": "\n Return the view transformation matrix.\n\n Parameters\n ----------\n E : 3-element numpy array\n The coordinates of the eye/camera.\n R : 3-element numpy array\n The coordinates of the center of the view box.\n V : 3-element numpy array\n Unit vector in the direction of the vertical axis.\n roll : float\n The roll angle in radians.\n ", "language": "en", "n_whitespaces": 106, "n_words": 53, "vocab_size": 30 }
21
Python
16
4896ec1a2cfb8c454e385632d8df213c915ced52
proj3d.py
109,756
4
42
view_transformation
https://github.com/matplotlib/matplotlib.git
Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449) * ENH: Add pan and zoom toolbar handling to 3D Axes 1) This moves the pan logic that was already in the mouse move handler into the "drag_pan" method to make it available from the toolbar. 2) This expands upon the panning logic to enable a zoom-to-box feature. The zoom-to-box is done relative to the Axes, so it shrinks/expands the box as a fraction of each delta, from lower-left Axes to lower-left zoom-box. Thus, it tries to handle non-centered zooms, which adds more cases to handle versus the current right-click zoom only scaling from the center of the projection. * Rewrite zooming with bounding box * Rewrite 3d panning to work with a roll angle * Whats new for zoom and pan buttons * Make pan button configurable * Do not jump when zooming and mouse goes over other subplot * Rework zooming for 3d plots * Handle x/y lock when zooming and panning * Update tests * Docstrings * Dont assume a scale_z * Limit zoom box * Test zoom pan key modifiers * Save some calculation by saving view axes * Deprecation warnings for Axes3D.eye, .vvec * Remove Axes3D._prepare_view_from_bbox for now * Comments and docstrings * Switch from uvn to uvw * Save aspect to axes * Constrain zooming with mouse when one of the equal aspect ratios is set * Cleanup * Cleanup * Consolidate finding equal aspect axis indices * linting * More intuitive scaling * Box zoom keeps existing aspect ratios * Linting * Code review comments * Revert parameters for view_transformation * Fix new 3d pan/zoom view going on view stack twice * Better clipping * Test 3d toolbar navigation * Privatize helper functions * Deprecations * Code review changes * Deprecation note * Undeprecate proj3d.view_transformation * Undeprecate proj3d.view_transformation * Update doc/api/next_api_changes/deprecations/23449-SS.rst Co-authored-by: Greg Lucas <[email protected]> Co-authored-by: Scott Shambaugh <[email protected]> Co-authored-by: Oscar Gustafsson <[email protected]>
33
0
23,737
8
4
19
def _get_missing_alignments(self) -> Generator[str, None, None]: self.output_message = "Frames missing from alignments file" exclude_filetypes = set(["yaml", "yml", "p", "json", "txt"]) for frame in tqdm(cast(Dict[str, str], self._items), desc=self.output_message, leave=False): frame_name = frame["frame_fullname"] if (frame["frame_extension"] not in exclude_filetypes and not self._alignments.frame_exists(frame_name)): logger.debug("Returning: '%s'", frame_name) yield frame_name
tools/alignments/jobs.py
169
faceswap
{ "docstring": " yield each frame that does not exist in alignments file\n\n Yields\n ------\n str\n The frame name of any frames missing alignments\n ", "language": "en", "n_whitespaces": 61, "n_words": 21, "vocab_size": 19 }
44
Python
38
e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1
jobs.py
101,717
18
103
_get_missing_alignments
https://github.com/deepfakes/faceswap.git
Alignments Tool - Typing, Documentation + Re-org
193
0
21,121
13
1
3
def rebalance_partitions(cls, partitions): return partitions
modin/core/dataframe/pandas/partitioning/partition_manager.py
18
modin
{ "docstring": "\n Return the provided array of partitions without rebalancing it.\n\n Parameters\n ----------\n partitions : np.ndarray\n The 2-d array of partitions to rebalance.\n\n Returns\n -------\n np.ndarray\n The same 2-d array.\n ", "language": "en", "n_whitespaces": 107, "n_words": 28, "vocab_size": 21 }
5
Python
5
8d1004fdbdaa05700613c8e6287641a732acf606
partition_manager.py
153,177
2
10
rebalance_partitions
https://github.com/modin-project/modin.git
FIX-#3675: Expand virtual partitioning utility (#3886) Co-authored-by: mvashishtha <[email protected]> Co-authored-by: jeffreykennethli <[email protected]> Co-authored-by: Anatoly Myachev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Naren Krishna <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Dmitry Chigarev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Doris Lee <[email protected]> Co-authored-by: Aditya Parameswaran <[email protected]> Co-authored-by: Rehan Sohail Durrani <[email protected]> Co-authored-by: Susmit Vengurlekar <[email protected]> Signed-off-by: Devin Petersohn <[email protected]>
19
0
35,280
6
2
8
def _handle_deprecations(self) -> None: if self._args.distributed: deprecation_warning("`-d`, `--distributed`", "Please use `-D`, `--distribution-strategy`") logger.warning("Setting 'distribution-strategy' to 'mirrored'") setattr(self._args, "distribution_strategy", "mirrored") del self._args.distributed
scripts/train.py
79
faceswap
{ "docstring": " Handle the update of deprecated arguments and output warnings. ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
21
Python
21
2ea05623bd684b2d1dd75679ad00441a5c751e7e
train.py
101,076
8
43
_handle_deprecations
https://github.com/deepfakes/faceswap.git
Update Distibution Strategies: - Add Central Storage Stategy - Deprecate 'distributed' cli argument
110
0
20,513
10
3
33
def to_qa_preds(self, top_preds, no_ans_gaps, baskets): ret = [] # Iterate over each set of document level prediction for pred_d, no_ans_gap, basket in zip(top_preds, no_ans_gaps, baskets): # Unpack document offsets, clear text and id token_offsets = basket.raw["document_offsets"] pred_id = basket.id_external if basket.id_external else basket.id_internal # These options reflect the different input dicts that can be assigned to the basket # before any kind of normalization or preprocessing can happen question_names = ["question_text", "qas", "questions"] doc_names = ["document_text", "context", "text"] document_text = try_get(doc_names, basket.raw) question = self.get_question(question_names, basket.raw) ground_truth = self.get_ground_truth(basket) curr_doc_pred = QAPred( id=pred_id, prediction=pred_d, context=document_text, question=question, token_offsets=token_offsets, context_window_size=self.context_window_size, aggregation_level="document", ground_truth_answer=ground_truth, no_answer_gap=no_ans_gap, ) ret.append(curr_doc_pred) return ret
haystack/modeling/model/prediction_head.py
238
haystack
{ "docstring": "\n Groups Span objects together in a QAPred object\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
105
Python
84
a59bca366174d9c692fa19750c24d65f47660ef7
prediction_head.py
256,248
23
152
to_qa_preds
https://github.com/deepset-ai/haystack.git
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
418
0
74,828
12
2
9
def _deconstruct_messages(snuba_messages): return [ (json.loads(msg.payload.value.decode("utf-8")), msg.payload.headers) for msg in snuba_messages ]
tests/sentry/sentry_metrics/test_batch.py
59
sentry
{ "docstring": "\n Convert a list of messages returned by `reconstruct_messages` into python\n primitives, to run assertions on:\n\n assert _deconstruct_messages(batch.reconstruct_messages(...)) == [ ... ]\n\n This is slightly nicer to work with than:\n\n assert batch.reconstruct_messages(...) == _construct_messages([ ... ])\n\n ...because pytest's assertion diffs work better with python primitives.\n ", "language": "en", "n_whitespaces": 74, "n_words": 44, "vocab_size": 37 }
11
Python
11
f31b57cbc5ec359c8ef9c6459d3d9d8ffcd6e8d9
test_batch.py
93,937
5
36
_deconstruct_messages
https://github.com/getsentry/sentry.git
ref(metrics_indexer): Improve typing, introduce more dataclasses, fix org_id namespacing bug in metadata [INGEST-1380] (#37170)
34
0
19,028
13
3
9
def from_key_val_list(value): if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError("cannot encode objects that are not 2-tuples") return OrderedDict(value)
pipenv/patched/pip/_vendor/requests/utils.py
63
pipenv
{ "docstring": "Take an object and test to see if it can be represented as a\n dictionary. Unless it can not be represented as such, return an\n OrderedDict, e.g.,\n\n ::\n\n >>> from_key_val_list([('key', 'val')])\n OrderedDict([('key', 'val')])\n >>> from_key_val_list('string')\n Traceback (most recent call last):\n ...\n ValueError: cannot encode objects that are not 2-tuples\n >>> from_key_val_list({'key': 'val'})\n OrderedDict([('key', 'val')])\n\n :rtype: OrderedDict\n ", "language": "en", "n_whitespaces": 127, "n_words": 56, "vocab_size": 44 }
24
Python
22
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
utils.py
22,139
6
39
from_key_val_list
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
50
0
4,211
10
2
10
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, BloomModel): module.gradient_checkpointing = value BLOOM_START_DOCSTRING = r BLOOM_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.", BLOOM_START_DOCSTRING, )
src/transformers/models/bloom/modeling_bloom.py
64
@add_start_docstrings( "The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.", BLOOM_START_DOCSTRING, )
transformers
{ "docstring": "\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BloomConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else\n `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`BloomTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 957, "n_words": 474, "vocab_size": 241 }
33
Python
30
ca2a55e9dfb245527b5e1c954fec6ffbb7aef07b
modeling_bloom.py
31,156
3
24
_set_gradient_checkpointing
https://github.com/huggingface/transformers.git
BLOOM (#17474) * adding template * update model * model update * update conf for debug model * update conversion * update conversion script * update conversion script * fix missing keys check * add tests to test the tokenizer in the local machine * Change variable name * add tests on xnli dataset * add more description * add descriptions + clearer code * clearer code * adding new tests + skipping few tests because of env problems * change comment * add dtype on the configuration * add test embeddings * add hardcoded test * fix dtype issue * adding torch.float16 to config * adding more metrics (min, max, mean) * add sum * now the test passes with almost equal * add files for conversion - test passes on cpu gpu * add final changes * cleaning code * add new args in the docstring * fix one liner function * remove macros * remove forward attention * clean up init funtion * add comments on the issue * rm scale mask softmax * do make style * fix dtype in init * fixing for loop on att probs * fix style with black * fix style + doc error * fix and debug CI errors (docs + style) * some updates - change new operations - finally add scaled softmax - added new args in the config * make use cache working * add changes - save sharded models - final changes on the modeling script * add changes - comment on alibi - add TODO on seq length * test commit - added a text to test the commit Co-authored-by: thomasw21 <[email protected]> * final changes - attention mask change - generation works on BS176b Co-authored-by: thomasw21 <[email protected]> * changes - model + conversion * move to correct dir * put , * fex fixes * fix tokenizer autodoc * fix minor CI issues * fix minor CI issues * fix minor CI issues * fix style issue * fix minor import issues * fix few issues * remove def main on the test * add require torch * replace decorator with 'with' * fix style * change to bloom * add quick fix tokenizer * fix tokenizer file * fix tokenizer - merge tests - small fixes * fix import issue * add bloom to readme * fix consistency * Update docs/source/en/model_doc/bloom.mdx Co-authored-by: Sylvain Gugger <[email protected]> * Apply suggestions from code review fix comment issues on file headers Co-authored-by: Sylvain Gugger <[email protected]> * fix doc issue * small fix - modeling test * some changes - refactor some code - taking into account reviews - more tests should pass - removed pruning tests * remove useless division * more tests should pass * more tests should pass * more tests should pass * let's try this one -add alibi offset - remove all permutes to make the grad operations work - finger crossed * refactor - refactor code - style changes - add new threshold for test * major changes - change BLOOM to Bloom - add quick doc on bloom.mdx - move embeddings test on modeling test * modify readme * small fixes * small fix - better threshold for a test * remove old test file from fetcher * fix small typo * major change - change BloomLMHead to BloomForCausalLM * remove onnx config * major changes - refactor the code - remove asserts - change tol for test * make style * small change * adding a slow test + commenting old ones for now * make style * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * make style * fix duplicates * cleaning comments on config * clean a bit conversion file * refacor a bit modeling file * refactor tokenizer file * fix tokenization test issue * fix tokenization issue #2 * fix tokenization issue second try * fix test issue * make style + add suggestions * change test fetcher * try this one - slow tests should pass - finger crossed * possible final changes * make style * try fix padding side issue * fix side * fix padding issue * fix ko-readme * fix config auto * cleaning modeling file * keep bloom in caps in ko * update config docs * remove pretraining_pp * remove model parallel * update config - add correct config files * fix duplicates * fix fetcher * fix refactor issue - remove divide function * try to remove alibi * small fixes - fix alibi - remove seq length - refactor a bit the code * put correct values - fix bos and eos token ids * fix attention mask loop Co-authored-by: thomasw21 <[email protected]> * small fixes: - remove skip bias add * small fixes - fix typo in readme - fix typos in config * small changes - remove a test - add reconstruction test - change config * small changes - change Scaled Softmax to BloomScaledSoftmax * small fixes - fix alibi dtype * major changes - removing explicit dtype when loading modules - fixing test args (torch_dtype=auto) - add dosctring * fix readmes * major changes - now bloom supports alibi shifting - refactor a bit the code - better test tolerance now * refactor a bit * refactor a bit * put correct name on test * change docstring * small changes - fix docstring modeling - fix test tolerance * fix small nit - take dtype from tensors in the conversion script * minor fix - fix mdx issue * minor fix - change config docstring * forward contrib credits from PR14084 * Apply suggestions from code review Co-authored-by: Stas Bekman <[email protected]> * apply modifications Co-authored-by: Stas Bekman <[email protected]> * resolve softmax upcast * Apply suggestions from code review Co-authored-by: Stas Bekman <[email protected]> * Update src/transformers/models/bloom/modeling_bloom.py Co-authored-by: Niklas Muennighoff <[email protected]> * final changes modeling Co-authored-by: Stas Bekman <[email protected]> * Merge commit 'd156898f3b9b2c990e5963f5030a7143d57921a2' * merge commit * Apply suggestions from code review Co-authored-by: Stas Bekman <[email protected]> * apply suggestions Apply suggestions from Stas comments Co-authored-by: Stas Bekman <[email protected]> * Fix gradient checkpointing Co-authored-by: Stas Bekman <[email protected]> * add slow but exact * add accelerate compatibility Co-authored-by: Nicolas Patry <[email protected]> * forward contrib credits Co-authored-by: thomasw21 <[email protected]> Co-authored-by: sgugger <[email protected]> Co-authored-by: patrickvonplaten <[email protected]> Co-authored-by: Niklas Muennighoff <[email protected]> Co-authored-by: LysandreJik <[email protected]> * Apply suggestions from code review Co-authored-by: Patrick von Platen <[email protected]> * fix torch device on tests * make style * Apply suggestions from code review Co-authored-by: Patrick von Platen <[email protected]> * fix nits Co-authored-by: patrickvonplaten<[email protected]> * remove final nits * fix doc - add more details on the doc - add links to checkpoints * Update src/transformers/__init__.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/bloom/modeling_bloom.py Co-authored-by: Sylvain Gugger <[email protected]> * apply suggestions Co-authored-by: sgugger <[email protected]> * put test torchscript to false * Update src/transformers/models/bloom/modeling_bloom.py Co-authored-by: justheuristic <[email protected]> * fix alibi - create alibi only once * add small doc * make quality * replace torch.nn * remove token type emb * fix fused op + output bias * add fused op - now can control fused operation from config * remove fused op * make quality * small changes - remove unsed args on config - removed bias gelu file - make the model torchscriptable - add torchscript slow tests * Update src/transformers/models/bloom/modeling_bloom.py * fix slow * make style * add accelerate support * add bloom to deepspeed tests * minor changes * Apply suggestions from code review Co-authored-by: Patrick von Platen <[email protected]> * minor change * slow tests pass * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Update docs/source/en/model_doc/bloom.mdx Co-authored-by: Sylvain Gugger <[email protected]> * minor changes: - change docstring - add link to paper Co-authored-by: Thomwolf <[email protected]> Co-authored-by: Thomas Wolf <[email protected]> Co-authored-by: thomasw21 <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: sIncerass <[email protected]> Co-authored-by: Stas Bekman <[email protected]> Co-authored-by: Niklas Muennighoff <[email protected]> Co-authored-by: Nicolas Patry <[email protected]> Co-authored-by: thomasw21 <[email protected]> Co-authored-by: sgugger <[email protected]> Co-authored-by: patrickvonplaten <[email protected]> Co-authored-by: LysandreJik <[email protected]> Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: justheuristic <[email protected]> Co-authored-by: Stas Bekman <[email protected]>
52
1
5,691
9
3
15
def __getitem__(self, name): # -> EntryPoint: if isinstance(name, int): warnings.warn( "Accessing entry points by index is deprecated. " "Cast to tuple if needed.", DeprecationWarning, stacklevel=2, ) return super().__getitem__(name) try: return next(iter(self.select(name=name))) except StopIteration: raise KeyError(name)
python3.10.4/Lib/importlib/metadata/__init__.py
108
XX-Net
{ "docstring": "\n Get the EntryPoint in self matching name.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
35
Python
33
8198943edd73a363c266633e1aa5b2a9e9c9f526
__init__.py
218,271
13
64
__getitem__
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
179
0
55,236
14
1
7
def isgeneratorfunction(obj): return _inspect.isgeneratorfunction( tf.__internal__.decorator.unwrap(obj)[1] )
keras/utils/tf_inspect.py
42
keras
{ "docstring": "TFDecorator-aware replacement for inspect.isgeneratorfunction.", "language": "en", "n_whitespaces": 3, "n_words": 4, "vocab_size": 4 }
6
Python
6
84afc5193d38057e2e2badf9c889ea87d80d8fbf
tf_inspect.py
277,066
4
25
isgeneratorfunction
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
22
0
81,843
12
4
31
def _update_from_feed(self, feed_entry, last_update, last_update_successful): self._title = feed_entry.title # Convert distance if not metric system. if self._unit_system == CONF_UNIT_SYSTEM_IMPERIAL: self._distance = round( DistanceConverter.convert( feed_entry.distance_to_home, LENGTH_KILOMETERS, LENGTH_MILES ), 1, ) else: self._distance = round(feed_entry.distance_to_home, 1) self._latitude = round(feed_entry.coordinates[0], 5) self._longitude = round(feed_entry.coordinates[1], 5) self._attribution = feed_entry.attribution self._alert_level = feed_entry.alert_level self._activity = feed_entry.activity self._hazards = feed_entry.hazards self._feed_last_update = dt.as_utc(last_update) if last_update else None self._feed_last_update_successful = ( dt.as_utc(last_update_successful) if last_update_successful else None )
homeassistant/components/geonetnz_volcano/sensor.py
228
core
{ "docstring": "Update the internal state from the provided feed entry.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
70
Python
52
503434e538af4b708f01cee9ca20bfa8426cec94
sensor.py
288,948
21
150
_update_from_feed
https://github.com/home-assistant/core.git
Use DistanceConverter in components (#80182) * Use DistanceConverter in components * Adjust for METRIC_SYSTEM
276
0
88,097
13
7
17
def test_checking_core_page_fields_are_indexed(self): # first confirm that errors show as EventPage (in test models) has no Page.search_fields errors = [error for error in checks.run_checks() if error.id == 'wagtailsearch.W001'] # should only ever get this warning on the sub-classes of the page model self.assertEqual([EventPage, SingleEventPage], [error.obj for error in errors]) for error in errors: self.assertEqual(error.msg, 'Core Page fields missing in `search_fields`', ) self.assertIn( 'Page model search fields `search_fields = Page.search_fields + [...]`', error.hint) # second check that we get no errors when setting up the models correctly with patch_search_fields(EventPage, Page.search_fields + EventPage.search_fields): errors = [error for error in checks.run_checks() if error.id == 'wagtailsearch.W001'] self.assertEqual([], errors)
wagtail/search/tests/test_indexed_class.py
185
wagtail
{ "docstring": "Run checks to ensure that when core page fields are missing we get a warning", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
103
Python
70
d964675ee8fcb7ea58681ac8869733a86d58e4ec
test_indexed_class.py
70,443
11
113
test_checking_core_page_fields_are_indexed
https://github.com/wagtail/wagtail.git
add check for correct search_fields on pages - fixes #4940
233
0
15,509
12
1
20
def test_mixed_string_bytes_categoricals(): # data as unicode X = np.array([["b"], ["a"]], dtype="U") # predefined categories as bytes categories = [np.array(["b", "a"], dtype="S")] ohe = OneHotEncoder(categories=categories, sparse_output=False) msg = re.escape( "In column 0, the predefined categories have type 'bytes' which is incompatible" " with values of type 'str_'." ) with pytest.raises(ValueError, match=msg): ohe.fit(X) @pytest.mark.parametrize("missing_value", [np.nan, None])
sklearn/preprocessing/tests/test_encoders.py
175
@pytest.mark.parametrize("missing_value", [np.nan, None])
scikit-learn
{ "docstring": "Check that this mixture of predefined categories and X raises an error.\n\n Categories defined as bytes can not easily be compared to data that is\n a string.\n ", "language": "en", "n_whitespaces": 36, "n_words": 27, "vocab_size": 26 }
54
Python
44
ecb9a70e82d4ee352e2958c555536a395b53d2bd
test_encoders.py
261,789
10
82
test_mixed_string_bytes_categoricals
https://github.com/scikit-learn/scikit-learn.git
FIX Ensure dtype of categories is `object` for strings in `OneHotEncoder` (#25174) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
101
1
76,996
11
1
5
def execute(): frappe.reload_doc("HR", "doctype", "Leave Allocation") frappe.reload_doc("HR", "doctype", "Leave Ledger Entry") frappe.db.sql( ) frappe.db.sql( )
erpnext/patches/v13_0/set_company_in_leave_ledger_entry.py
78
erpnext
{ "docstring": "update `tabLeave Ledger Entry` as lle set company = (select company from `tabEmployee` where employee = lle.employee)update `tabLeave Allocation` as la set company = (select company from `tabEmployee` where employee = la.employee)", "language": "en", "n_whitespaces": 31, "n_words": 32, "vocab_size": 18 }
15
Python
10
494bd9ef78313436f0424b918f200dab8fc7c20b
set_company_in_leave_ledger_entry.py
66,793
9
40
execute
https://github.com/frappe/erpnext.git
style: format code with black
8
0
14,336
8
2
11
def collocations(self, num=20, window_size=2): collocation_strings = [ w1 + " " + w2 for w1, w2 in self.collocation_list(num, window_size) ] print(tokenwrap(collocation_strings, separator="; "))
nltk/text.py
76
nltk
{ "docstring": "\n Print collocations derived from the text, ignoring stopwords.\n\n >>> from nltk.book import text4\n >>> text4.collocations() # doctest: +NORMALIZE_WHITESPACE\n United States; fellow citizens; years ago; four years; Federal\n Government; General Government; American people; Vice President; God\n bless; Chief Justice; one another; fellow Americans; Old World;\n Almighty God; Fellow citizens; Chief Magistrate; every citizen; Indian\n tribes; public debt; foreign nations\n\n\n :param num: The maximum number of collocations to print.\n :type num: int\n :param window_size: The number of tokens spanned by a collocation (default=2)\n :type window_size: int\n ", "language": "en", "n_whitespaces": 204, "n_words": 84, "vocab_size": 69 }
23
Python
20
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
text.py
42,549
5
47
collocations
https://github.com/nltk/nltk.git
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <[email protected]> Co-authored-by: Tom Aarsen <[email protected]> Co-authored-by: Tom Aarsen <[email protected]>
62
0
7,611
11
3
10
def get_held_invoices(party_type, party): held_invoices = None if party_type == "Supplier": held_invoices = frappe.db.sql( "select name from `tabPurchase Invoice` where release_date IS NOT NULL and release_date > CURDATE()", as_dict=1, ) held_invoices = set(d["name"] for d in held_invoices) return held_invoices
erpnext/accounts/utils.py
78
erpnext
{ "docstring": "\n\tReturns a list of names Purchase Invoices for the given party that are on hold\n\t", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
38
Python
32
494bd9ef78313436f0424b918f200dab8fc7c20b
utils.py
65,407
9
46
get_held_invoices
https://github.com/frappe/erpnext.git
style: format code with black
29
0
13,888
12
2
4
def is_reserved(self): return (self.network_address.is_reserved and self.broadcast_address.is_reserved)
python3.10.4/Lib/ipaddress.py
34
XX-Net
{ "docstring": "Test if the address is otherwise IETF reserved.\n\n Returns:\n A boolean, True if the address is within one of the\n reserved IPv6 Network ranges.\n\n ", "language": "en", "n_whitespaces": 60, "n_words": 24, "vocab_size": 19 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
ipaddress.py
218,555
3
20
is_reserved
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
35
0
55,386
9
2
13
def reshape(self, *newshape): new_total_size = functools.reduce(lambda x,y: x*y, newshape) if new_total_size != self._loop_size: raise ValueError("Invalid reshape parameters " + str(newshape)) # there is no `.func` as this class does not subtype `Basic`: return type(self)(self._array, newshape)
sympy/tensor/array/dense_ndim_array.py
90
sympy
{ "docstring": "\n Returns MutableDenseNDimArray instance with new shape. Elements number\n must be suitable to new shape. The only argument of method sets\n new shape.\n\n Examples\n ========\n\n >>> from sympy import MutableDenseNDimArray\n >>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3))\n >>> a.shape\n (2, 3)\n >>> a\n [[1, 2, 3], [4, 5, 6]]\n >>> b = a.reshape(3, 2)\n >>> b.shape\n (3, 2)\n >>> b\n [[1, 2], [3, 4], [5, 6]]\n\n ", "language": "en", "n_whitespaces": 196, "n_words": 69, "vocab_size": 49 }
35
Python
33
645539ed9a65eec4a7bfc4571bdf2135cfb68cfb
dense_ndim_array.py
199,144
5
55
reshape
https://github.com/sympy/sympy.git
Fix bug in error message (cast tuple to str) ```python from sympy.abc import x, y, z from sympy import Array a2 = Array([[[x, y], [z, x*z]], [[1, x*y], [1/x, x/y]]]) a2.reshape(1) ``` Out: ```text TypeError: can only concatenate str (not "tuple") to str ``` This casts `newshape` to a string to the error message makes sense.
81
0
49,163
12
2
19
def _looks_like_red_hat_scheme() -> bool: from distutils.command.install import install from distutils.dist import Distribution cmd: Any = install(Distribution()) cmd.finalize_options() return ( cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local" and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local" ) @functools.lru_cache(maxsize=None)
pipenv/patched/notpip/_internal/locations/__init__.py
137
@functools.lru_cache(maxsize=None)
pipenv
{ "docstring": "Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``.\n\n Red Hat's ``00251-change-user-install-location.patch`` changes the install\n command's ``prefix`` and ``exec_prefix`` to append ``\"/local\"``. This is\n (fortunately?) done quite unconditionally, so we create a default command\n object without any configuration to detect this.\n ", "language": "en", "n_whitespaces": 53, "n_words": 38, "vocab_size": 35 }
28
Python
25
7e33fcae4384563b4c927fd44318c29dd524a097
__init__.py
19,466
16
52
_looks_like_red_hat_scheme
https://github.com/pypa/pipenv.git
Vendor in pip 21.2.4 release (from pip 21.2.2 prior). (#5009) * Vendor in pip 21.2.4 release (from pip 21.2.2 prior). * Add news fragment for pip 21.2.4 vendor update. * Add potentially missing LICENSE files
62
1
2,983
13
2
5
def __call__(self, name=None): if name is not None: return self._setResultsName(name) else: return self.copy()
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
52
transferlearning
{ "docstring": "\n Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.\n\n If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be\n passed as ``True``.\n\n If ``name` is omitted, same as calling :class:`copy`.\n\n Example::\n\n # these are equivalent\n userdata = Word(alphas).setResultsName(\"name\") + Word(nums + \"-\").setResultsName(\"socsecno\")\n userdata = Word(alphas)(\"name\") + Word(nums + \"-\")(\"socsecno\")\n ", "language": "en", "n_whitespaces": 124, "n_words": 48, "vocab_size": 38 }
13
Python
12
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,355
5
31
__call__
https://github.com/jindongwang/transferlearning.git
upd; format
56
0
13,264
10
1
10
def _num_elements(losses): with backend.name_scope("num_elements") as scope: return tf.cast(tf.size(losses, name=scope), dtype=losses.dtype)
keras/utils/losses_utils.py
66
keras
{ "docstring": "Computes the number of elements in `losses` tensor.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
10
Python
10
84afc5193d38057e2e2badf9c889ea87d80d8fbf
losses_utils.py
276,975
3
38
_num_elements
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
23
0
81,809
12
1
4
def get_input_for_correctness_test(self, **kwargs): return get_correctness_test_inputs(**kwargs)
keras/distribute/keras_correctness_test_base.py
27
keras
{ "docstring": "Generates inputs that are dictionaries.\n\n We only provide a default implementation of this method here. If you need\n more customized way of providing input to your model, overwrite this method.\n\n Args:\n **kwargs: key word arguments about how to create the input dictionaries\n\n Returns:\n Three dictionaries representing the input for fit(), evaluate() and\n predict()\n ", "language": "en", "n_whitespaces": 115, "n_words": 53, "vocab_size": 46 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras_correctness_test_base.py
270,387
2
15
get_input_for_correctness_test
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
19
0
80,460
8
5
15
def __new__(cls, freq=None): if isinstance(freq, PeriodDtype): return freq elif freq is None: # empty constructor for pickle compat # -10_000 corresponds to PeriodDtypeCode.UNDEFINED u = PeriodDtypeBase.__new__(cls, -10_000) u._freq = None return u if not isinstance(freq, BaseOffset): freq = cls._parse_dtype_strict(freq) try: return cls._cache_dtypes[freq.freqstr] except KeyError: dtype_code = freq._period_dtype_code u = PeriodDtypeBase.__new__(cls, dtype_code) u._freq = freq cls._cache_dtypes[freq.freqstr] = u return u
pandas/core/dtypes/dtypes.py
169
pandas
{ "docstring": "\n Parameters\n ----------\n freq : frequency\n ", "language": "en", "n_whitespaces": 34, "n_words": 5, "vocab_size": 5 }
59
Python
37
c7010a7adec1c47a4642fa068544699fc8e1ea6a
dtypes.py
171,304
17
106
__new__
https://github.com/pandas-dev/pandas.git
STYLE enable pylint's redefined-outer-name (#49671) * fix warning for pandas/core/dtypes/cast.py, pandas/core/dtypes/dtypes.py, pandas/core/indexes/base.py * fix warning for pandas/core/dtypes/cast.py, pandas/core/dtypes/dtypes.py, pandas/core/indexes/base.py * fix warning for pandas/core/dtypes/cast.py, pandas/core/dtypes/dtypes.py, pandas/core/indexes/base.py * fix warning for pandas/core/dtypes/cast.py, pandas/core/dtypes/dtypes.py, pandas/core/indexes/base.py Co-authored-by: bishwas jha <[email protected]>
244
0
40,660
12
4
21
def actor_table(self, actor_id): self._check_connected() if actor_id is not None: actor_id = ray.ActorID(hex_to_binary(actor_id)) actor_info = self.global_state_accessor.get_actor_info(actor_id) if actor_info is None: return {} else: actor_table_data = gcs_utils.ActorTableData.FromString(actor_info) return self._gen_actor_info(actor_table_data) else: actor_table = self.global_state_accessor.get_actor_table() results = {} for i in range(len(actor_table)): actor_table_data = gcs_utils.ActorTableData.FromString(actor_table[i]) results[ binary_to_hex(actor_table_data.actor_id) ] = self._gen_actor_info(actor_table_data) return results
python/ray/state.py
202
ray
{ "docstring": "Fetch and parse the actor table information for a single actor ID.\n\n Args:\n actor_id: A hex string of the actor ID to fetch information about.\n If this is None, then the actor table is fetched.\n\n Returns:\n Information from the actor table.\n ", "language": "en", "n_whitespaces": 99, "n_words": 41, "vocab_size": 31 }
48
Python
30
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
state.py
131,062
19
124
actor_table
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
273
0
29,463
15
1
2
def test_ragged_tensor_output(self):
keras/engine/compile_utils_test.py
13
keras
{ "docstring": "Ensure that ragged tensors can be passed as targets and predictions.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
2
Python
2
84afc5193d38057e2e2badf9c889ea87d80d8fbf
compile_utils_test.py
271,076
15
192
test_ragged_tensor_output
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
9
0
80,686
6
1
22
def test_xml_element(self): el = Element("tag") el.set("key", "value") el.text = "text" childA = Element("childA") childB = Element("childB") el.append(childA) el.append(childB) upper = transform(str.upper) newelem: Element = validate(xml_element(tag=upper, text=upper, attrib={upper: upper}), el) assert newelem is not el assert newelem.tag == "TAG" assert newelem.text == "TEXT" assert newelem.attrib == {"KEY": "VALUE"} assert newelem[0].tag == "childA" assert newelem[1].tag == "childB" assert newelem[0] is not childA assert newelem[1] is not childB with self.assertRaises(ValueError) as cm: validate(xml_element(tag="invalid"), el) assert_validationerror(cm.exception, ) with self.assertRaises(ValueError) as cm: validate(xml_element(text="invalid"), el) assert_validationerror(cm.exception, ) with self.assertRaises(ValueError) as cm: validate(xml_element(attrib={"key": "invalid"}), el) assert_validationerror(cm.exception, )
tests/test_api_validate.py
407
streamlink
{ "docstring": "\n ValidationError(XmlElementSchema):\n Unable to validate XML tag\n Context(equality):\n 'tag' does not equal 'invalid'\n \n ValidationError(XmlElementSchema):\n Unable to validate XML text\n Context(equality):\n 'text' does not equal 'invalid'\n \n ValidationError(XmlElementSchema):\n Unable to validate XML attributes\n Context(dict):\n Unable to validate value of key 'key'\n Context(equality):\n 'value' does not equal 'invalid'\n ", "language": "en", "n_whitespaces": 256, "n_words": 44, "vocab_size": 21 }
90
Python
52
3d44da082b3ba202b9d0557bfd8ce747a1d7960c
test_api_validate.py
187,159
44
235
test_xml_element
https://github.com/streamlink/streamlink.git
plugin.api.validate: implement ValidationError - Implement `ValidationError` - Inherit from `ValueError` to preserve backwards compatiblity - Allow collecting multiple errors (AnySchema) - Keep an error stack of parent `ValidationError`s or other exceptions - Format error stack when converting error to string - Raise `ValidationError` instead of `ValueError` - Add error contexts where it makes sense - Add schema names to error instances - Add and update tests
283
0
45,718
15
1
5
def require_fsdp(test_case): return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(test_case)
src/accelerate/test_utils/testing.py
44
accelerate
{ "docstring": "\n Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed\n ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 15 }
11
Python
11
0c6bdc2c237ac071be99ac6f93ddfbc8bbcb8441
testing.py
337,934
2
23
require_fsdp
https://github.com/huggingface/accelerate.git
enhancements and fixes for FSDP and DeepSpeed (#532) * checkpointing enhancements and fixes for FSDP and DeepSpeed * resolving comments 1. Adding deprecation args and warnings in launcher for FSDP 2. Handling old configs to work with new launcher args wrt FSDP. 3. Reverting changes to public methods in `checkpointing.py` and handling it in `Accelerator` 4. Explicitly writing the defaults of various FSDP options in `dataclasses` for readability. * fixes 1. FSDP wrapped model being added to the `_models`. 2. Not passing the env variables when args are None. * resolving comments * adding FSDP for all the collective operations * adding deepspeed and fsdp tests 1. Removes mrpc datafiles and directly relies on HF datasets as it was throwing `file not found` error when running from within `tests` folder. Updating `moke_dataloaders` as a result. 2. adding `test_performance.py`, `test_memory.py` and `test_checkpointing.py` for multi-gpu FSDP and DeepSpeed tests * reverting `mocked_dataloader` changes * adding FSDP tests * data files revert * excluding fsdp tests from `tests_core` * try 2 * adding time delay to avoid `torchrun` from crashing at times leading which causing flaky behaviour * reducing the time of tests * fixes * fix * fixes and reduce time further * reduce time further and minor fixes * adding a deepspeed basic e2e test for single gpu setup
17
0
121,141
11
1
4
def cur_num_workers(self): # Factor like this for convenient re-use. return self._cur_num_workers(self.node_data_dict)
python/ray/autoscaler/batching_node_provider.py
28
ray
{ "docstring": "Returns dict mapping node type to the number of nodes of that type.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
11
Python
11
c51b0c9a5664e5c6df3d92f9093b56e61b48f514
batching_node_provider.py
136,556
2
15
cur_num_workers
https://github.com/ray-project/ray.git
[autoscaler][kuberay] Batching node provider (#29933) Implements the abstract subclass of NodeProvider proposed in https://docs.google.com/document/d/1JyQINBFirZw7YenA_14zize0R3hIII1_fnfQytIXTPo/ The goal is to simplify the autoscaler's interactions with external cluster managers like the KubeRay operator. A follow-up PR will implement KuberayNodeProvider as a subclass of the BatchingNodeProvider added here. Signed-off-by: Dmitri Gekhtman <[email protected]>
32
0
30,939
8
2
3
def generate_lscolors(self) -> str:
kittens/tui/dircolors.py
16
kitty
{ "docstring": " Output the database in the format used by the LS_COLORS environment variable. ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 10 }
4
Python
4
4a3ed628092fac5b2552c8554c0482c569d14323
dircolors.py
102,926
4
29
generate_lscolors
https://github.com/kovidgoyal/kitty.git
Refactor: More f-string for kittens
11
0
21,582
6
3
7
def get_keras_custom_objects(): # pylint:disable=no-name-in-module,import-outside-toplevel if get_backend() == "amd" or get_tf_version() < 2.8: from keras.utils import get_custom_objects else: from keras.utils.generic_utils import get_custom_objects return get_custom_objects()
lib/utils.py
68
faceswap
{ "docstring": " Wrapper to obtain keras.utils.get_custom_objects from correct location depending on\n backend used and tensorflow version. ", "language": "en", "n_whitespaces": 18, "n_words": 14, "vocab_size": 14 }
23
Python
20
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
utils.py
100,363
6
40
get_keras_custom_objects
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
52
0
19,852
9
3
19
def __call__(self, data_tuple): # Metaupdate Step. print("Meta-Update Step") samples = data_tuple[0] adapt_metrics_dict = data_tuple[1] self.postprocess_metrics( adapt_metrics_dict, prefix="MAMLIter{}".format(self.step_counter) ) # MAML Meta-update. fetches = None for i in range(self.maml_optimizer_steps): fetches = self.workers.local_worker().learn_on_batch(samples) learner_stats = get_learner_stats(fetches) # Update KLs.
rllib/agents/mbmpo/mbmpo.py
126
ray
{ "docstring": "Args:\n data_tuple (tuple): 1st element is samples collected from MAML\n Inner adaptation steps and 2nd element is accumulated metrics\n ", "language": "en", "n_whitespaces": 40, "n_words": 19, "vocab_size": 17 }
37
Python
30
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
mbmpo.py
133,770
34
242
__call__
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
143
0
30,102
13
6
20
def hmean(a, axis=0, dtype=None, *, weights=None): if not isinstance(a, np.ndarray): a = np.array(a, dtype=dtype) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): a = np.ma.asarray(a, dtype=dtype) else: a = np.asarray(a, dtype=dtype) if np.all(a >= 0): # Harmonic mean only defined if greater than or equal to zero. if weights is not None: weights = np.asanyarray(weights, dtype=dtype) with np.errstate(divide='ignore'): return 1.0 / np.average(1.0 / a, axis=axis, weights=weights) else: raise ValueError("Harmonic mean only defined if all elements greater " "than or equal to zero") ModeResult = namedtuple('ModeResult', ('mode', 'count'))
scipy/stats/_stats_py.py
265
scipy
{ "docstring": "Calculate the harmonic mean along the specified axis.\n\n That is: n / (1/x1 + 1/x2 + ... + 1/xn)\n\n Parameters\n ----------\n a : array_like\n Input array, masked array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the harmonic mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If `dtype` is not specified, it defaults to the\n dtype of `a`, unless `a` has an integer `dtype` with a precision less\n than that of the default platform integer. In that case, the default\n platform integer is used.\n weights : array_like, optional\n The weights array can either be 1-D (in which case its length must be\n the size of `a` along the given `axis`) or of the same shape as `a`.\n Default is None, which gives each value a weight of 1.0.\n\n .. versionadded:: 1.9\n\n Returns\n -------\n hmean : ndarray\n See `dtype` parameter above.\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n gmean : Geometric mean\n\n Notes\n -----\n The harmonic mean is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n\n Use masked arrays to ignore any non-finite values in the input or that\n arise in the calculations such as Not a Number and infinity.\n\n References\n ----------\n .. [1] \"Weighted Harmonic Mean\", *Wikipedia*,\n https://en.wikipedia.org/wiki/Harmonic_mean#Weighted_harmonic_mean\n .. [2] Ferger, F., \"The nature and use of the harmonic mean\", Journal of\n the American Statistical Association, vol. 26, pp. 36-40, 1931\n\n Examples\n --------\n >>> from scipy.stats import hmean\n >>> hmean([1, 4])\n 1.6000000000000001\n >>> hmean([1, 2, 3, 4, 5, 6, 7])\n 2.6997245179063363\n\n ", "language": "en", "n_whitespaces": 516, "n_words": 302, "vocab_size": 188 }
93
Python
66
a1546047bc146bf3189fa905c3415475b0e47931
_stats_py.py
241,810
16
155
hmean
https://github.com/scipy/scipy.git
ENH: stats: add weights in harmonic mean (#15347) Co-authored-by: Pamphile Roy <[email protected]>
231
0
69,705
15
3
12
def ordinal(value): try: value = int(value) except (TypeError, ValueError): return value if value % 100 in (11, 12, 13): # Translators: Ordinal format for 11 (11th), 12 (12th), and 13 (13th). value = pgettext("ordinal 11, 12, 13", "{}th").format(value) else: templates = ( # Translators: Ordinal format when value ends with 0, e.g. 80th. pgettext("ordinal 0", "{}th"), # Translators: Ordinal format when value ends with 1, e.g. 81st, except 11. pgettext("ordinal 1", "{}st"), # Translators: Ordinal format when value ends with 2, e.g. 82nd, except 12. pgettext("ordinal 2", "{}nd"), # Translators: Ordinal format when value ends with 3, e.g. 83th, except 13. pgettext("ordinal 3", "{}rd"), # Translators: Ordinal format when value ends with 4, e.g. 84th. pgettext("ordinal 4", "{}th"), # Translators: Ordinal format when value ends with 5, e.g. 85th. pgettext("ordinal 5", "{}th"), # Translators: Ordinal format when value ends with 6, e.g. 86th. pgettext("ordinal 6", "{}th"), # Translators: Ordinal format when value ends with 7, e.g. 87th. pgettext("ordinal 7", "{}th"), # Translators: Ordinal format when value ends with 8, e.g. 88th. pgettext("ordinal 8", "{}th"), # Translators: Ordinal format when value ends with 9, e.g. 89th. pgettext("ordinal 9", "{}th"), ) value = templates[value % 10].format(value) # Mark value safe so i18n does not break with <sup> or <sub> see #19988 return mark_safe(value) @register.filter(is_safe=True)
django/contrib/humanize/templatetags/humanize.py
278
@register.filter(is_safe=True)
django
{ "docstring": "\n Convert an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',\n 3 is '3rd', etc. Works for any integer.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 21 }
212
Python
94
9c19aff7c7561e3a82978a272ecdaad40dda5c00
humanize.py
204,149
22
143
ordinal
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
501
1
50,649
13
1
11
def get_granger_causality(dependent_series, independent_series, lags): granger_set = pd.concat([dependent_series, independent_series], axis=1) granger = grangercausalitytests(granger_set, [lags], verbose=False) return granger
openbb_terminal/econometrics/econometrics_model.py
63
OpenBBTerminal
{ "docstring": "Calculate granger tests\n\n Parameters\n ----------\n dependent_series: Series\n The series you want to test Granger Causality for.\n independent_series: Series\n The series that you want to test whether it Granger-causes time_series_y\n lags : int\n The amount of lags for the Granger test. By default, this is set to 3.\n ", "language": "en", "n_whitespaces": 86, "n_words": 47, "vocab_size": 36 }
16
Python
14
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
econometrics_model.py
285,200
4
42
get_granger_causality
https://github.com/OpenBB-finance/OpenBBTerminal.git
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: hjoaquim <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: colin99d <[email protected]> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: James Simmons <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]> Co-authored-by: minhhoang1023 <[email protected]> Co-authored-by: jose-donato <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: northern-64bit <[email protected]> Co-authored-by: hjoaquim <[email protected]>
28
0
85,240
9
1
14
def fetch_buffered_group_stats(group): from sentry import buffer from sentry.models import Group result = buffer.get(Group, ["times_seen"], {"pk": group.id}) group.times_seen_pending = result["times_seen"] @instrumented_task( name="sentry.tasks.post_process.post_process_group", time_limit=120, soft_time_limit=110, )
src/sentry/tasks/post_process.py
101
@instrumented_task( name="sentry.tasks.post_process.post_process_group", time_limit=120, soft_time_limit=110, )
sentry
{ "docstring": "\n Fetches buffered increments to `times_seen` for this group and adds them to the current\n `times_seen`.\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 14 }
24
Python
21
09726d7fc95e53bb516e328fc1811fc9a0704cac
post_process.py
96,154
5
44
fetch_buffered_group_stats
https://github.com/getsentry/sentry.git
fix(post_process): Fetch buffered `times_seen` values and add them to `Group.times_seen` (#31624) In `post_process_group` we process issue alert rules and also ignored groups. Both of these can have conditions that read from the `times_seen` value on the `Group`. The problem here is that updates to `times_seen` are buffered and only written every 45s or so. This means that most of the time when a `Group` goes through `post_process_group` it has an out of date `times_seen` value. For infrequently updated groups, this can just mean that the count is -1. But for high volume groups this could mean that we're considerably below the count. To improve this, we read the current value from buffers and store it as pending updates on the group. We then use this pending value when checking rules and snoozes in post process. There's a potential race condition here where we fetch the `Group`, and before we fetch the value from buffers it is cleared, and so we miss out on the update. This should be infrequent enough that it's not a problem, and either way we will be considerably more accurate most of the time.
46
1
19,285
11
3
21
def mixin_base_ppr_parser(parser): mixin_essential_parser(parser) gp = add_arg_group(parser, title='Base Deployment') gp.add_argument( '--extra-search-paths', type=str, default=[], nargs='*', help='Extra search paths to be used when loading modules and finding YAML config files.' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--timeout-ctrl', type=int, default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')), help='The timeout in milliseconds of the control request, -1 for waiting forever', ) parser.add_argument( '--k8s-namespace', type=str, help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--polling', type=str, default=PollingType.ANY.name, help=, )
jina/parsers/orchestrate/base.py
202
jina
{ "docstring": "Mixing in arguments required by pod/deployment/runtime module into the given parser.\n :param parser: the parser instance to which we add arguments\n \n The polling strategy of the Deployment and its endpoints (when `shards>1`).\n Can be defined for all endpoints of a Deployment or by endpoint.\n Define per Deployment:\n - ANY: only one (whoever is idle) Pod polls the message\n - ALL: all Pods poll the message (like a broadcast)\n Define per Endpoint:\n JSON dict, {endpoint: PollingType}\n {'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}\n \n ", "language": "en", "n_whitespaces": 119, "n_words": 81, "vocab_size": 66 }
80
Python
64
a3b71c7208b3cd48aa7bc978c3343a074947e3d9
base.py
12,207
41
123
mixin_base_ppr_parser
https://github.com/jina-ai/jina.git
fix(parsers): clearify flow args (#4701)
253
0
2,215
13
4
9
def preprocess(self, x): if self.type == "value": return x elif self.type == "index": return [self.choices.index(choice) for choice in x] else: raise ValueError( "Unknown type: " + str(self.type) + ". Please choose from: 'value', 'index'." )
gradio/inputs.py
96
gradio
{ "docstring": "\n Parameters:\n x (List[str]): list of selected choices\n Returns:\n (Union[List[str], List[int]]): list of selected choices as strings or indices within choice list\n ", "language": "en", "n_whitespaces": 57, "n_words": 21, "vocab_size": 16 }
35
Python
31
cc0cff893f9d7d472788adc2510c123967b384fe
inputs.py
179,244
11
55
preprocess
https://github.com/gradio-app/gradio.git
Format The Codebase - black formatting - isort formatting
152
0
42,924
15