ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
@not_implemented_for("directed") @not_implemented_for("multigraph")
41,909
176,448
294
networkx/algorithms/asteroidal.py
105
21
def find_asteroidal_triple(G): r V = set(G.nodes) if len(V) < 6: # An asteroidal triple cannot exist in a graph with 5 or less vertices. return None component_structure = create_component_structure(G) E_complement = set(nx.complement(G).edges) for e in E_complement: u = e[0] v = e[1] u_neighborhood = set(G[u]).union([u]) v_neighborhood = set(G[v]).union([v]) union_of_neighborhoods = u_neighborhood.union(v_neighborhood)
Minor improvements from general code readthrough (#5414) * Add deprecated directive to reversed docstring. * Add missing dep directives to shpfiles. * Remove defn of INF sentinel. * typo. * str -> comment in forloop. * STY: appropriate casing for var name.
find_asteroidal_triple
cc1db275efc709cb964ce88abbfa877798d58c10
networkx
asteroidal.py
15
61
https://github.com/networkx/networkx.git
7
169
1
80
280
Python
{ "docstring": "Find an asteroidal triple in the given graph.\n\n An asteroidal triple is a triple of non-adjacent vertices such that\n there exists a path between any two of them which avoids the closed\n neighborhood of the third. It checks all independent triples of vertices\n and whether they are an asteroidal triple or not. This is done with the\n help of a data structure called a component structure.\n A component structure encodes information about which vertices belongs to\n the same connected component when the closed neighborhood of a given vertex\n is removed from the graph. The algorithm used to check is the trivial\n one, outlined in [1]_, which has a runtime of\n :math:`O(|V||\\overline{E} + |V||E|)`, where the second term is the\n creation of the component structure.\n\n Parameters\n ----------\n G : NetworkX Graph\n The graph to check whether is AT-free or not\n\n Returns\n -------\n list or None\n An asteroidal triple is returned as a list of nodes. If no asteroidal\n triple exists, i.e. the graph is AT-free, then None is returned.\n The returned value depends on the certificate parameter. The default\n option is a bool which is True if the graph is AT-free, i.e. the\n given graph contains no asteroidal triples, and False otherwise, i.e.\n if the graph contains at least one asteroidal triple.\n\n Notes\n -----\n The component structure and the algorithm is described in [1]_. The current\n implementation implements the trivial algorithm for simple graphs.\n\n References\n ----------\n .. [1] Ekkehard Köhler,\n \"Recognizing Graphs without asteroidal triples\",\n Journal of Discrete Algorithms 2, pages 439-452, 2004.\n https://www.sciencedirect.com/science/article/pii/S157086670400019X\n ", "language": "en", "n_whitespaces": 395, "n_words": 253, "vocab_size": 145 }
def find_asteroidal_triple(G): r V = set(G.nodes) if len(V) < 6: # An asteroidal triple cannot exist in a graph with 5 or less vertices. return None component_structure = create_component_structure(G) E_complement = set(nx.complement(G).edges) for e in E_complement: u = e[0] v = e[1] u_neighborhood = set(G[u]).union([u]) v_neighborhood = set(G[v]).union([v]) union_of_neighborhoods = u_neighborhood.union(v_neighborhood) for w in V - union_of_neighborhoods: # Check for each pair of vertices whether they belong to the # same connected component when the closed neighborhood of the # third is removed. if ( component_structure[u][v] == component_structure[u][w] and component_structure[v][u] == component_structure[v][w] and component_structure[w][u] == component_structure[w][v] ): return [u, v, w] return None @not_implemented_for("directed") @not_implemented_for("multigraph")
@frappe.whitelist()
14,114
66,155
12
erpnext/hr/doctype/leave_application/leave_application.py
19
10
def get_leave_entries(employee, leave_type, from_date, to_date): return frappe.db.sql( , {"from_date": from_date, "to_date": to_date, "employee": employee, "leav
style: format code with black
get_leave_entries
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
leave_application.py
10
18
https://github.com/frappe/erpnext.git
1
44
1
18
83
Python
{ "docstring": "Returns leave entries between from_date and to_date.\n\t\tSELECT\n\t\t\temployee, leave_type, from_date, to_date, leaves, transaction_name, transaction_type, holiday_list,\n\t\t\tis_carry_forward, is_expired\n\t\tFROM `tabLeave Ledger Entry`\n\t\tWHERE employee=%(employee)s AND leave_type=%(leave_type)s\n\t\t\tAND docstatus=1\n\t\t\tAND (leaves<0\n\t\t\t\tOR is_expired=1)\n\t\t\tAND (from_date between %(from_date)s AND %(to_date)s\n\t\t\t\tOR to_date between %(from_date)s AND %(to_date)s\n\t\t\t\tOR (from_date < %(from_date)s AND to_date > %(to_date)s))\n\t", "language": "en", "n_whitespaces": 40, "n_words": 52, "vocab_size": 37 }
def get_leave_entries(employee, leave_type, from_date, to_date): return frappe.db.sql( , {"from_date": from_date, "to_date": to_date, "employee": employee, "leave_type": leave_type}, as_dict=1, ) @frappe.whitelist()
@register.filter(is_safe=True) @stringfilter
51,427
206,236
13
django/template/defaultfilters.py
9
7
def addslashes(value): return value.replace(
Refs #33476 -- Reformatted code with Black.
addslashes
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
defaultfilters.py
12
2
https://github.com/django/django.git
1
29
1
9
81
Python
{ "docstring": "\n Add slashes before quotes. Useful for escaping strings in CSV, for\n example. Less useful for escaping JavaScript; use the ``escapejs``\n filter instead.\n ", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 19 }
def addslashes(value): return value.replace("\\", "\\\\").replace('"', '\\"').replace("'", "\\'") @register.filter(is_safe=True) @stringfilter
35,765
154,083
48
modin/config/envvars.py
16
7
def _get_raw_from_config(cls) -> str:
REFACTOR-#4629: Add type annotations to `modin/config` (#4685) Signed-off-by: Karthik Velayutham <[email protected]>
_get_raw_from_config
02363589aa5105e091fa3d790b29cddf94cc8118
modin
envvars.py
10
19
https://github.com/modin-project/modin.git
2
29
0
16
50
Python
{ "docstring": "\n Read the value from environment variable.\n\n Returns\n -------\n str\n Config raw value.\n\n Raises\n ------\n TypeError\n If `varname` is None.\n KeyError\n If value is absent.\n ", "language": "en", "n_whitespaces": 121, "n_words": 24, "vocab_size": 21 }
def _get_raw_from_config(cls) -> str: if cls.varname is None: raise TypeError("varname should not be None") return os.environ[cls.varname]
35,636
153,821
18
modin/core/storage_formats/base/query_compiler.py
4
6
def is_monotonic_decreasing(self): return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self)
REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514) Co-authored-by: Rehan Sohail Durrani <[email protected]> Signed-off-by: jeffreykennethli <[email protected]>
is_monotonic_decreasing
57e29bc5d82348006c5170ef9ac0a9eedcd9acf9
modin
query_compiler.py
10
2
https://github.com/modin-project/modin.git
1
20
0
4
35
Python
{ "docstring": "\n Return boolean if values in the object are monotonically decreasing.\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 49, "n_words": 13, "vocab_size": 13 }
def is_monotonic_decreasing(self): return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self)
19,281
96,147
119
src/sentry/buffer/redis.py
41
19
def get(self, model, columns, filters): key = self._make_key(model, filters) conn = self.cluster.get_local_client_for_key(key) pipe = conn.pipeline() for col in columns:
fix(post_process): Fetch buffered `times_seen` values and add them to `Group.times_seen` (#31624) In `post_process_group` we process issue alert rules and also ignored groups. Both of these can have conditions that read from the `times_seen` value on the `Group`. The problem here is that updates to `times_seen` are buffered and only written every 45s or so. This means that most of the time when a `Group` goes through `post_process_group` it has an out of date `times_seen` value. For infrequently updated groups, this can just mean that the count is -1. But for high volume groups this could mean that we're considerably below the count. To improve this, we read the current value from buffers and store it as pending updates on the group. We then use this pending value when checking rules and snoozes in post process. There's a potential race condition here where we fetch the `Group`, and before we fetch the value from buffers it is cleared, and so we miss out on the update. This should be infrequent enough that it's not a problem, and either way we will be considerably more accurate most of the time.
get
09726d7fc95e53bb516e328fc1811fc9a0704cac
sentry
redis.py
12
10
https://github.com/getsentry/sentry.git
4
93
0
35
146
Python
{ "docstring": "\n Fetches buffered values for a model/filter. Passed columns must be integer columns.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
def get(self, model, columns, filters): key = self._make_key(model, filters) conn = self.cluster.get_local_client_for_key(key) pipe = conn.pipeline() for col in columns: pipe.hget(key, f"i+{col}") results = pipe.execute() return { col: (int(results[i]) if results[i] is not None else 0) for i, col in enumerate(columns) }
17,671
83,385
52
zerver/tests/test_subs.py
13
8
def test_non_ascii_subscription_for_principal(self) -> None: iago = self.example_user("iago") self.assert_adding_subscriptions_for_principal( iago.id, get_realm("zulip"), ["hümbüǵ"], policy_name="Public"
stream_settings: Show stream privacy & description in stream events. Provide stream privacy and description in stream notification events when stream is created. In function "send_messages_for_new_subscribers" for when stream is created, put policy name and description of the stream. Fixes #21004
test_non_ascii_subscription_for_principal
4b9770e270823b7ed2bbbeda0e4450f0ba6a288b
zulip
test_subs.py
10
9
https://github.com/zulip/zulip.git
1
37
0
13
67
Python
{ "docstring": "\n You can subscribe other people to streams even if they containing\n non-ASCII characters.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
def test_non_ascii_subscription_for_principal(self) -> None: iago = self.example_user("iago") self.assert_adding_subscriptions_for_principal( iago.id, get_realm("zulip"), ["hümbüǵ"], policy_name="Public" )
7,546
42,453
210
nltk/corpus/reader/wordnet.py
54
16
def add_provs(self, reader): fileids = reader.fileids() for fileid in fileids: prov, langfile = os.path.split(fileid) file_name, file_extension = os.path.splitext(langfile) if file_extension == ".tab": lang = file_name.split("-")[-1] if lang in self.provenance
Initialize empty provenance for default English
add_provs
8ffd0d8190552d45f8b92e18da3fc41639e5185d
nltk
wordnet.py
14
10
https://github.com/nltk/nltk.git
4
84
0
41
150
Python
{ "docstring": "Add languages from Multilingual Wordnet to the provenance dictionary", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def add_provs(self, reader): fileids = reader.fileids() for fileid in fileids: prov, langfile = os.path.split(fileid) file_name, file_extension = os.path.splitext(langfile) if file_extension == ".tab": lang = file_name.split("-")[-1] if lang in self.provenances.keys(): # We already have another resource for this lang, # so we need to further specify the lang id: lang = f"{lang}_{prov}" self.provenances[lang] = prov
72,127
248,149
72
tests/rest/client/test_relations.py
19
10
def test_thread_with_bundled_aggregations_for_latest(self) -> None: self._send_relation(Rel
Include bundled aggregations for the latest event in a thread. (#12273) The `latest_event` field of the bundled aggregations for `m.thread` relations did not include bundled aggregations itself. This resulted in clients needing to immediately request the event from the server (and thus making it useless that the latest event itself was serialized instead of just including an event ID).
test_thread_with_bundled_aggregations_for_latest
75dff3dc980974960f55fa21fc8e672201f63045
synapse
test_relations.py
9
12
https://github.com/matrix-org/synapse.git
1
68
0
16
93
Python
{ "docstring": "\n Bundled aggregations should get applied to the latest thread event.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def test_thread_with_bundled_aggregations_for_latest(self) -> None: self._send_relation(RelationTypes.THREAD, "m.room.test") channel = self._send_relation(RelationTypes.THREAD, "m.room.test") thread_2 = channel.json_body["event_id"] self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2 )
11,592
56,933
18
src/prefect/blocks/kubernetes.py
4
5
def from_environment(cls): return cls.from_file(path=KUBE_CONFI
organizational changes for the KubernetesClusterConfig and add from_environment classmethod
from_environment
574d10ff7612661b37801c811862f18998521d58
prefect
kubernetes.py
8
2
https://github.com/PrefectHQ/prefect.git
1
15
0
4
27
Python
{ "docstring": "\n Factory method to produce an instance of this class using the default kube config location\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 15 }
def from_environment(cls): return cls.from_file(path=KUBE_CONFIG_DEFAULT_LOCATION)
71,705
247,511
586
tests/rest/media/v1/test_media_storage.py
112
33
def test_thumbnail_repeated_thumbnail(self) -> None: self._test_thumbnail( "scale", self.test_image.expected_scaled, self.test_image.expected_found ) if not self.test_image.expected_found: return # Fetching again should work, without re-requesting the image from the # remote. params = "?width=32&height=32&method=scale" channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), "GET", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result["body"], self.test_image.expected_scaled, channel.result["body"], ) # Deleting the thumbnail on disk then re-requesting it should work as # Synapse should regenerate missing thumbnails. origin, media_id = self.media_id.split("/") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) file_id = info["filesystem_id"] thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) ch
Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke <[email protected]>
test_thumbnail_repeated_thumbnail
32c828d0f760492711a98b11376e229d795fd1b3
synapse
test_media_storage.py
11
49
https://github.com/matrix-org/synapse.git
4
263
0
68
414
Python
{ "docstring": "Test that fetching the same thumbnail works, and deleting the on disk\n thumbnail regenerates it.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 13 }
def test_thumbnail_repeated_thumbnail(self) -> None: self._test_thumbnail( "scale", self.test_image.expected_scaled, self.test_image.expected_found ) if not self.test_image.expected_found: return # Fetching again should work, without re-requesting the image from the # remote. params = "?width=32&height=32&method=scale" channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), "GET", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result["body"], self.test_image.expected_scaled, channel.result["body"], ) # Deleting the thumbnail on disk then re-requesting it should work as # Synapse should regenerate missing thumbnails. origin, media_id = self.media_id.split("/") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) file_id = info["filesystem_id"] thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), "GET", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result["body"], self.test_image.expected_scaled, channel.result["body"], )
105,431
306,647
124
homeassistant/components/wake_on_lan/switch.py
23
14
def update(self) -> None: ping_cmd = [ "ping", "-c", "1", "-W", str(DEFAULT_PING_TIMEOUT), str(self._host), ] status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL) self._state = not bool(status)
Improve entity type hints [w] (#77886)
update
a6b6949793e2571bf46cdca2e541ddf64cb1fc71
core
switch.py
10
12
https://github.com/home-assistant/core.git
1
61
0
21
100
Python
{ "docstring": "Check if device is on and update the state. Only called if assumed state is false.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 14 }
def update(self) -> None: ping_cmd = [ "ping", "-c", "1", "-W", str(DEFAULT_PING_TIMEOUT), str(self._host), ] status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL) self._state = not bool(status)
85,861
286,538
299
openbb_terminal/portfolio/portfolio_model.py
33
13
def get_transactions(self): df = self.__transactions[ [ "Date", "Type", "Ticker", "Side", "Price", "Quantity", "Fees", "Investment", "Currency", "Sector", "Industry", "Country", "Region",
Incorporate portfolio class into SDK (#3401) * create functions to interact with portfolio * fix some docstrings * view docstrings * make portfolio loading available in sdk * reorder some methods * fix bug * update controller * update website * remove import * change input name * regenerate website * change portfolio arg name * fix metrics bugs * fix report * refactor assets alloc * refactor assets sectors alloc * remove unecessary attributes * refactor allocaasset sector * reorganize class * first refactor alloc * refactor portfolio alloc * black * fix alloc bug * regenerate sdk website * fix alloc bugs * forgot this exception * some refactor on portfolio alloc country region * fix some allocation bugs * add examples * regenerate website Co-authored-by: James Maslek <[email protected]>
get_transactions
8e9e6bd57f4bc5d57ccedfacccda6342d5881266
OpenBBTerminal
portfolio_model.py
11
22
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
87
0
28
157
Python
{ "docstring": "Get formatted transactions\n\n Returns\n -------\n pd.DataFrame: formatted transactions\n ", "language": "en", "n_whitespaces": 40, "n_words": 8, "vocab_size": 6 }
def get_transactions(self): df = self.__transactions[ [ "Date", "Type", "Ticker", "Side", "Price", "Quantity", "Fees", "Investment", "Currency", "Sector", "Industry", "Country", "Region", ] ] df = df.replace(np.nan, "-") df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") df.sort_values(by="Date", ascending=False, inplace=True) return df
17,589
83,055
832
zerver/tests/test_subs.py
157
16
def test_pick_colors(self) -> None: used_colors: Set[str] = set() color_map: Dict[int, str] = {} recipient_ids = list(range(30)) user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, { 0: "#76ce90", 1: "#fae589", 2: "#a6c7e5", 3: "#e79ab5", 4: "#bfd56f", 5: "#f4ae55", 6: "#b0a5fd", 7: "#addfe5", 8: "#f5ce6e", 9: "#c2726a", 10: "#94c849", 11: "#bd86e5", 12: "#ee7e4a", 13: "#a6dcbf", 14: "#95a5fd", 15: "#53a063", 16: "#9987e1", 17: "#e4523d", 18: "#c2c2c2", 19: "#4f8de4", 20: "#c6a8ad", 21: "#e7cc4d", 22: "#c8bebf", 23: "#a47462", # start repeating 24: "#76ce90", 25: "#fae589", 26: "#a6c7e5", 27: "#e79ab5",
stream colors: Try harder to avoid collisions. We now use recipient_id % 24 for new stream colors when users have already used all 24 of our canned colors. This fix doesn't address the scenario that somebody dislikes one of our current canned colors, so if a user continually changes canned color N to some other color for new streams, their new streams will continue to include color N (and the user will still need to change them). This fix doesn't address the fact that it can be expensive during bulk-add situations to query for all the colors that users have already used up. See https://chat.zulip.org/#narrow/stream/3-backend/topic/assigning.20stream.20colors for more discussion.
test_pick_colors
dd1c9c45c778dc5280c2b02c3b9fb327d2507cc1
zulip
test_subs.py
10
70
https://github.com/zulip/zulip.git
1
315
0
106
520
Python
{ "docstring": "\n If we are assigning colors to a user with 24+ streams, we have to start\n re-using old colors. Our algorithm basically uses recipient_id % 24, so\n the following code reflects the worse case scenario that our new\n streams have recipient ids spaced out by exact multiples of 24. We\n don't try to work around this edge case, since users who really depend\n on the stream colors can always just assign themselves custom colors\n for the streams that they really want to stand out.\n\n Even if recipient_ids were completely random, the odds of collisions\n are low, but it's often the case that bulk-adds are done for streams\n that either were or are being created at roughly the same time, so the\n recipient_ids tend to have even fewer collisions.\n ", "language": "en", "n_whitespaces": 214, "n_words": 127, "vocab_size": 96 }
def test_pick_colors(self) -> None: used_colors: Set[str] = set() color_map: Dict[int, str] = {} recipient_ids = list(range(30)) user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, { 0: "#76ce90", 1: "#fae589", 2: "#a6c7e5", 3: "#e79ab5", 4: "#bfd56f", 5: "#f4ae55", 6: "#b0a5fd", 7: "#addfe5", 8: "#f5ce6e", 9: "#c2726a", 10: "#94c849", 11: "#bd86e5", 12: "#ee7e4a", 13: "#a6dcbf", 14: "#95a5fd", 15: "#53a063", 16: "#9987e1", 17: "#e4523d", 18: "#c2c2c2", 19: "#4f8de4", 20: "#c6a8ad", 21: "#e7cc4d", 22: "#c8bebf", 23: "#a47462", # start repeating 24: "#76ce90", 25: "#fae589", 26: "#a6c7e5", 27: "#e79ab5", 28: "#bfd56f", 29: "#f4ae55", }, ) color_map = {98: "color98", 99: "color99"} used_colors = set(STREAM_ASSIGNMENT_COLORS) - {"#c6a8ad", "#9987e1"} recipient_ids = [99, 98, 1, 2, 3, 4] user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, {98: "color98", 99: "color99", 1: "#9987e1", 2: "#c6a8ad", 3: "#e79ab5", 4: "#bfd56f"}, ) used_colors = set(STREAM_ASSIGNMENT_COLORS) color_map = {} recipient_ids = [2, 26, 50, 74] user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, {2: "#a6c7e5", 26: "#a6c7e5", 50: "#a6c7e5", 74: "#a6c7e5"}, )
77,645
264,221
449
netbox/extras/tests/test_customfields.py
167
25
def test_import(self): data = ( ('name', 'slug', 'status', 'cf_text', 'cf_longtext', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_json', 'cf_select', 'cf_multiselect'), ('Site 1', 'site-1', 'active', 'ABC', 'Foo', '123', 'True', '2020-01-01', 'http://example.com/1', '{"foo": 123}', 'Choice A', '"Choice A,Choice B"'), ('Site 2', 'site-2', 'active', 'DEF', 'Bar', '456', 'False', '2020-01-02', 'http://example.com/2', '{"bar": 456}', 'Choice B', '"Choice B,Choice C"'), ('Site 3', 'site-3', 'active', '', '', '', '', '', '', '', '', ''), ) csv_data = '\n'.join(','.join(row) for row in data) response = self.client.post(reverse('dcim:site_import'), {'csv': csv_data}) self.assertEqual(response.status_code, 200) self.assertEqual(Site.objects.count(), 3) # Validate data for site 1 site1 = Site.objects.get(name='Site 1') self.assertEqual(len(site1.custom_field_data), 9) self.assertEqual(site1.custom_field_data['text'], 'ABC') self.assertEqual(site1.custom_field_data['longtext'], 'Foo') self.assertEqual(site1.custom_field_data['integer'], 123) self.assertEqual(site1.custom_field_data['boolean'], True) self.assertEqual(site1.custom_field_data['date'], '2020-01-01') self.assertEqual(site1.custom_field_data['url'], 'http://example.com/1') self.assertEqual(site1.custom_field_data['json'], {"foo": 123}) self.assertEqual(site1.custom_field_data['select'], 'Choice A') self.assertEqual(site1.custom_field_data['multiselect'], ['Choice A', 'Choice B']) # Validate data for site 2 site2 = Site
Fixes #8317: Fix CSV import of multi-select custom field values
test_import
7421e5f7d7e579ed1a0acf840c39ae61fd851504
netbox
test_customfields.py
12
35
https://github.com/netbox-community/netbox.git
2
501
0
128
888
Python
{ "docstring": "\n Import a Site in CSV format, including a value for each CustomField.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
def test_import(self): data = ( ('name', 'slug', 'status', 'cf_text', 'cf_longtext', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_json', 'cf_select', 'cf_multiselect'), ('Site 1', 'site-1', 'active', 'ABC', 'Foo', '123', 'True', '2020-01-01', 'http://example.com/1', '{"foo": 123}', 'Choice A', '"Choice A,Choice B"'), ('Site 2', 'site-2', 'active', 'DEF', 'Bar', '456', 'False', '2020-01-02', 'http://example.com/2', '{"bar": 456}', 'Choice B', '"Choice B,Choice C"'), ('Site 3', 'site-3', 'active', '', '', '', '', '', '', '', '', ''), ) csv_data = '\n'.join(','.join(row) for row in data) response = self.client.post(reverse('dcim:site_import'), {'csv': csv_data}) self.assertEqual(response.status_code, 200) self.assertEqual(Site.objects.count(), 3) # Validate data for site 1 site1 = Site.objects.get(name='Site 1') self.assertEqual(len(site1.custom_field_data), 9) self.assertEqual(site1.custom_field_data['text'], 'ABC') self.assertEqual(site1.custom_field_data['longtext'], 'Foo') self.assertEqual(site1.custom_field_data['integer'], 123) self.assertEqual(site1.custom_field_data['boolean'], True) self.assertEqual(site1.custom_field_data['date'], '2020-01-01') self.assertEqual(site1.custom_field_data['url'], 'http://example.com/1') self.assertEqual(site1.custom_field_data['json'], {"foo": 123}) self.assertEqual(site1.custom_field_data['select'], 'Choice A') self.assertEqual(site1.custom_field_data['multiselect'], ['Choice A', 'Choice B']) # Validate data for site 2 site2 = Site.objects.get(name='Site 2') self.assertEqual(len(site2.custom_field_data), 9) self.assertEqual(site2.custom_field_data['text'], 'DEF') self.assertEqual(site2.custom_field_data['longtext'], 'Bar') self.assertEqual(site2.custom_field_data['integer'], 456) self.assertEqual(site2.custom_field_data['boolean'], False) self.assertEqual(site2.custom_field_data['date'], '2020-01-02') self.assertEqual(site2.custom_field_data['url'], 'http://example.com/2') self.assertEqual(site2.custom_field_data['json'], {"bar": 456}) self.assertEqual(site2.custom_field_data['select'], 'Choice B') self.assertEqual(site2.custom_field_data['multiselect'], ['Choice B', 'Choice C']) # No custom field data should be set for site 3 site3 = Site.objects.get(name='Site 3') self.assertFalse(any(site3.custom_field_data.values()))
55,450
218,720
122
python3.10.4/Lib/lib2to3/fixes/fix_renames.py
37
9
def build_pattern(): #bare = set() for module, replace in list(MAPPING.items()): for old_attr, new_attr in list(replace.items()): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #yield
add python 3.10.4 for windows
build_pattern
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
fix_renames.py
12
11
https://github.com/XX-net/XX-Net.git
3
60
0
24
104
Python
{ "docstring": "\n # import_name< 'import' (module=%r\n # | dotted_as_names< any* module=%r any* >) >\n # \n import_from< 'from' module_name=%r 'import'\n ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >\n \n power< module_name=%r trailer< '.' attr_name=%r > any* >\n bare_name=%s", "language": "en", "n_whitespaces": 178, "n_words": 35, "vocab_size": 22 }
def build_pattern(): #bare = set() for module, replace in list(MAPPING.items()): for old_attr, new_attr in list(replace.items()): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #yield % (module, module) yield % (module, old_attr, old_attr) yield % (module, old_attr) #yield % alternates(bare)
104,662
305,878
195
homeassistant/components/plex/sensor.py
42
18
async def async_refresh_sensor(self) -> None: _LOGGER.debug("Refreshing library sensor for '%s'", self.name) try: await self.hass.async_add_executor_job(self._update_state_and_attrs) self._attr_available = True except NotFound: self._attr_available = False except requests.exceptions.RequestException as err: _LOGGER.error( "Could not update library sensor for '%s': %s", sel
Improve entity type hints [p] (#77871)
async_refresh_sensor
474844744bdd2b0dcba46b82d9d3fcd8e3dbad24
core
sensor.py
12
16
https://github.com/home-assistant/core.git
3
78
0
33
132
Python
{ "docstring": "Update state and attributes for the library sensor.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
async def async_refresh_sensor(self) -> None: _LOGGER.debug("Refreshing library sensor for '%s'", self.name) try: await self.hass.async_add_executor_job(self._update_state_and_attrs) self._attr_available = True except NotFound: self._attr_available = False except requests.exceptions.RequestException as err: _LOGGER.error( "Could not update library sensor for '%s': %s", self.library_section.title, err, ) self._attr_available = False self.async_write_ha_state()
12,765
61,941
22
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
8
6
def __hash__(self): return hash(self
upd; format
__hash__
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
database.py
10
2
https://github.com/jindongwang/transferlearning.git
1
27
0
7
46
Python
{ "docstring": "\n Compute hash in a way which matches the equality test.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def __hash__(self): return hash(self.name) + hash(self.version) + hash(self.source_url)
20,199
100,746
68
plugins/train/model/phaze_a.py
22
7
def _min_nodes(self) -> int: if self._side == "gblock":
Model updates - Increase model summary width - Phaze A updates - Update some min/max values - Add Decoder Filter Slope Mode - Add additional arguments for Upsampling2D - Adjust upsampling method for multiple upsamples in FC layers - Typing
_min_nodes
a99049711f289b435e710d5b15f9c0e45c4251c3
faceswap
phaze_a.py
12
9
https://github.com/deepfakes/faceswap.git
2
52
0
18
91
Python
{ "docstring": " int: The number of nodes for the first Dense. For non g-block layers this will be the\n given minimum filters multiplied by the dimensions squared. For g-block layers, this is the\n given value ", "language": "en", "n_whitespaces": 48, "n_words": 33, "vocab_size": 26 }
def _min_nodes(self) -> int: if self._side == "gblock": return self._config["fc_gblock_min_nodes"] retval = self._scale_filters(self._config["fc_min_filters"]) retval = int(retval * self._config["fc_dimensions"] ** 2) return retval
36,242
155,114
75
modin/config/envvars.py
22
14
def _get(cls) -> dict: custom_parameters = super().get() result = cls.default.copy() result.update( {key.replace("-", "_"): value for key, value in custom_parameters.items()}
FIX-#5187: Fixed RecursionError in OmnisciLaunchParameters.get() (#5199) Signed-off-by: Andrey Pavlenko <[email protected]>
_get
c51ab405efec920dbb4baa2e2389409df04e8d43
modin
envvars.py
12
15
https://github.com/modin-project/modin.git
2
55
0
19
95
Python
{ "docstring": "\n Get the resulted command-line options.\n\n Returns\n -------\n dict\n Decoded and verified config value.\n ", "language": "en", "n_whitespaces": 60, "n_words": 13, "vocab_size": 13 }
def _get(cls) -> dict: custom_parameters = super().get() result = cls.default.copy() result.update( {key.replace("-", "_"): value for key, value in custom_parameters.items()} ) return result
54,846
217,597
129
python3.10.4/Lib/graphlib.py
47
14
def add(self, node, *predecessors):
add python 3.10.4 for windows
add
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
graphlib.py
10
8
https://github.com/XX-net/XX-Net.git
3
61
0
39
102
Python
{ "docstring": "Add a new node and its predecessors to the graph.\n\n Both the *node* and all elements in *predecessors* must be hashable.\n\n If called multiple times with the same node argument, the set of dependencies\n will be the union of all dependencies passed in.\n\n It is possible to add a node with no dependencies (*predecessors* is not provided)\n as well as provide a dependency twice. If a node that has not been provided before\n is included among *predecessors* it will be automatically added to the graph with\n no predecessors of its own.\n\n Raises ValueError if called after \"prepare\".\n ", "language": "en", "n_whitespaces": 160, "n_words": 97, "vocab_size": 63 }
def add(self, node, *predecessors): if self._ready_nodes is not None: raise ValueError("Nodes cannot be added after a call to prepare()") # Create the node -> predecessor edges nodeinfo = self._get_nodeinfo(node) nodeinfo.npredecessors += len(predecessors) # Create the predecessor -> node edges for pred in predecessors: pred_info = self._get_nodeinfo(pred) pred_info.successors.append(node)
577
3,840
214
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py
87
32
def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date): end_date = start_date + duration(days=10) cursor_value = start_date + duration(days=5) state = { AdsInsights.cursor_field: cursor_value.date().isoformat(), "slices": [(cursor_value + duration(days=1)).date().isoformat(), (cursor_value + duration(days=3)).date().isoformat()], } stream = AdsInsights(api=api, start_date=start_date, end_date=end_date) async_manager_mock.completed_jobs.return_value = [1, 2, 3] slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental)) assert slices == [{"insight_job": 1}, {"insight_job": 2}, {"insight_job
🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <[email protected]> Co-authored-by: Eugene Kulak <[email protected]>
test_stream_slices_with_state_and_slices
a3aae8017a0a40ff2006e2567f71dccb04c997a5
airbyte
test_base_insight_streams.py
18
17
https://github.com/airbytehq/airbyte.git
1
244
0
62
386
Python
{ "docstring": "Stream will use cursor_value from state, but will skip saved slices", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date): end_date = start_date + duration(days=10) cursor_value = start_date + duration(days=5) state = { AdsInsights.cursor_field: cursor_value.date().isoformat(), "slices": [(cursor_value + duration(days=1)).date().isoformat(), (cursor_value + duration(days=3)).date().isoformat()], } stream = AdsInsights(api=api, start_date=start_date, end_date=end_date) async_manager_mock.completed_jobs.return_value = [1, 2, 3] slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental)) assert slices == [{"insight_job": 1}, {"insight_job": 2}, {"insight_job": 3}] async_manager_mock.assert_called_once() args, kwargs = async_manager_mock.call_args generated_jobs = list(kwargs["jobs"]) assert len(generated_jobs) == (end_date - cursor_value).days - 2, "should be 2 slices short because of state" assert generated_jobs[0].interval.start == cursor_value.date() + duration(days=2) assert generated_jobs[1].interval.start == cursor_value.date() + duration(days=4)
52,095
207,776
434
tests/admin_views/tests.py
92
30
def _test_readonly_foreignkey_links(self, admin_site): chapter = Chapter.objects.create( title="Chapter 1", content="content", book=Book.objects.create(name="Book 1"), ) language = Language.objects.create(iso="_40", name="Test") obj = ReadOnlyRelatedField.objects.create( chapter=chapter, language=language, user=self.superuser, ) response = self.client.get( reverse( f"{admin_site}:admin_views_readonlyrelatedfield_change", args=(obj.pk,) ), ) # Related ForeignKey object registered in admin. user_url = reverse(f"{admin_site}:auth_user_change", args=(self.superuser.pk,)) self.assertContains( response, '<div class="readonly"><a href="%s">super</a></div>' % user_url, html=True, ) # Related ForeignKey with the string primary key registered in admin. language_url = reverse( f"{admin_site}:admin_views_language_change", args=(quote(language.pk),), ) self.assertContains( response, '<div class="readonly"><a href="%s">_40</a></div>' % language_url, html=True, ) # Related Forei
Refs #33476 -- Reformatted code with Black.
_test_readonly_foreignkey_links
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
13
35
https://github.com/django/django.git
1
181
0
58
299
Python
{ "docstring": "\n ForeignKey readonly fields render as links if the target model is\n registered in admin.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
def _test_readonly_foreignkey_links(self, admin_site): chapter = Chapter.objects.create( title="Chapter 1", content="content", book=Book.objects.create(name="Book 1"), ) language = Language.objects.create(iso="_40", name="Test") obj = ReadOnlyRelatedField.objects.create( chapter=chapter, language=language, user=self.superuser, ) response = self.client.get( reverse( f"{admin_site}:admin_views_readonlyrelatedfield_change", args=(obj.pk,) ), ) # Related ForeignKey object registered in admin. user_url = reverse(f"{admin_site}:auth_user_change", args=(self.superuser.pk,)) self.assertContains( response, '<div class="readonly"><a href="%s">super</a></div>' % user_url, html=True, ) # Related ForeignKey with the string primary key registered in admin. language_url = reverse( f"{admin_site}:admin_views_language_change", args=(quote(language.pk),), ) self.assertContains( response, '<div class="readonly"><a href="%s">_40</a></div>' % language_url, html=True, ) # Related ForeignKey object not registered in admin. self.assertContains( response, '<div class="readonly">Chapter 1</div>', html=True )
14,247
66,616
38
erpnext/patches/v12_0/fix_percent_complete_for_projects.py
51
16
def execute(): for project in frappe.get_all("Project", fields=["name", "percent_complete_method"]): total = frappe.db.count("Task", dict(project=project.name)) if project.percent_complete_method == "Task Completion" and total > 0: completed = frappe.db.sql( , project.name, )[0][0] per
style: format code with black
execute
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
fix_percent_complete_for_projects.py
16
14
https://github.com/frappe/erpnext.git
6
132
0
40
217
Python
{ "docstring": "select count(name) from tabTask where\n\t\t\t\t\tproject=%s and status in ('Cancelled', 'Completed')", "language": "en", "n_whitespaces": 9, "n_words": 11, "vocab_size": 11 }
def execute(): for project in frappe.get_all("Project", fields=["name", "percent_complete_method"]): total = frappe.db.count("Task", dict(project=project.name)) if project.percent_complete_method == "Task Completion" and total > 0: completed = frappe.db.sql( , project.name, )[0][0] percent_complete = flt(flt(completed) / total * 100, 2) if project.percent_complete != percent_complete: frappe.db.set_value("Project", project.name, "percent_complete", percent_complete) if percent_complete == 100: frappe.db.set_value("Project", project.name, "status", "Completed")
48,268
196,972
114
sympy/parsing/mathematica.py
37
14
def mathematica(s, additional_translations=None): parser = MathematicaParser(additional_translations) if additional_translations is not None: SymPyDeprecationWarning( feature="additional_translations parameter for the Mathematica parser", last_supported_version="1.9", useinstead="Use SymPy's .replace( ) or .subs( ) methods on the output express
Support parsing functions and some more Mathematica nodes. Commented Mathematica code is now parsed correctly.
mathematica
35a158ece2bec4d77d78a193fcafa4dd5fd5f691
sympy
mathematica.py
13
11
https://github.com/sympy/sympy.git
2
62
0
34
105
Python
{ "docstring": "\n Translate a string containing a Wolfram Mathematica expression to a SymPy\n expression.\n\n If the translator is unable to find a suitable SymPy expression, the\n ``FullForm`` of the Mathematica expression will be output, using SymPy\n ``Function`` objects as nodes of the syntax tree.\n\n Examples\n ========\n\n >>> from sympy.parsing.mathematica import mathematica\n >>> mathematica(\"Sin[x]^2 Tan[y]\")\n sin(x)**2*tan(y)\n >>> e = mathematica(\"F[7,5,3]\")\n >>> e\n F(7, 5, 3)\n >>> from sympy import Function, Max, Min\n >>> e.replace(Function(\"F\"), lambda *x: Max(*x)*Min(*x))\n 21\n\n Both standard input form and Mathematica full form are supported:\n\n >>> mathematica(\"x*(a + b)\")\n x*(a + b)\n >>> mathematica(\"Times[x, Plus[a, b]]\")\n x*(a + b)\n\n To get a matrix from Wolfram's code:\n\n >>> m = mathematica(\"{{a, b}, {c, d}}\")\n >>> m\n ((a, b), (c, d))\n >>> from sympy import Matrix\n >>> Matrix(m)\n Matrix([\n [a, b],\n [c, d]])\n\n If the translation into equivalent SymPy expressions fails, an SymPy\n expression equivalent to Wolfram Mathematica's \"FullForm\" will be created:\n\n >>> mathematica(\"x_.\")\n Optional(Pattern(x, Blank()))\n >>> mathematica(\"Plus @@ {x, y, z}\")\n Apply(Plus, (x, y, z))\n >>> mathematica(\"f[x_, 3] := x^3 /; x > 0\")\n SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0))\n ", "language": "en", "n_whitespaces": 298, "n_words": 180, "vocab_size": 125 }
def mathematica(s, additional_translations=None): parser = MathematicaParser(additional_translations) if additional_translations is not None: SymPyDeprecationWarning( feature="additional_translations parameter for the Mathematica parser", last_supported_version="1.9", useinstead="Use SymPy's .replace( ) or .subs( ) methods on the output expression", issue="23042", ).warn() return sympify(parser._parse_old(s)) return parser.parse(s)
14,535
67,468
18
erpnext/setup/doctype/company/company.py
28
15
def update_company_current_month_sales(company): current_month_year = formatdate(today(), "MM-yyyy") results = frappe.db.sql( .format( current_month_year=current_month_year, company=frappe.db.escape(company) ), as_dict=True, ) monthly_total = results[0]["total"] if
style: format code with black
update_company_current_month_sales
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
company.py
14
22
https://github.com/frappe/erpnext.git
2
80
0
25
129
Python
{ "docstring": "\n\t\tSELECT\n\t\t\tSUM(base_grand_total) AS total,\n\t\t\tDATE_FORMAT(`posting_date`, '%m-%Y') AS month_year\n\t\tFROM\n\t\t\t`tabSales Invoice`\n\t\tWHERE\n\t\t\tDATE_FORMAT(`posting_date`, '%m-%Y') = '{current_month_year}'\n\t\t\tAND docstatus = 1\n\t\t\tAND company = {company}\n\t\tGROUP BY\n\t\t\tmonth_year\n\t", "language": "en", "n_whitespaces": 16, "n_words": 27, "vocab_size": 20 }
def update_company_current_month_sales(company): current_month_year = formatdate(today(), "MM-yyyy") results = frappe.db.sql( .format( current_month_year=current_month_year, company=frappe.db.escape(company) ), as_dict=True, ) monthly_total = results[0]["total"] if len(results) > 0 else 0 frappe.db.set_value("Company", company, "total_monthly_sales", monthly_total)
12,290
60,778
30
.venv/lib/python3.8/site-packages/pip/_internal/metadata/base.py
9
3
def metadata_version(self): # type: () -> Optional[str] raise NotImplementedErro
upd; format
metadata_version
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
base.py
7
2
https://github.com/jindongwang/transferlearning.git
1
10
0
9
21
Python
{ "docstring": "Value of \"Metadata-Version:\" in the distribution, if available.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def metadata_version(self): # type: () -> Optional[str] raise NotImplementedError()
83,683
281,280
92
gamestonk_terminal/stocks/options/screener_controller.py
23
10
def call_ca(self, _): if self.screen_tickers: self.queue = ca_controller.ComparisonAnalysisController( self.screen_tickers, self.queue ).menu(custom_path_menu_above=
Baseclass (#1141) * A working decorator * Basic intro * Added more * Refactor * Refactor * Cleaned code * Simplified function (thanks Chavi) * Small change * Updating tests : fix issue with mock * Updating tests : fix remaining mocks after merging * Updating tests : black * Cleaned up * Finished base cases * Notes * Slight changes * Added dynamic options handling, error persists * Fixed pylint issues * Fixed mock * fix decorator with dynamic dictionary of args * move choices from dynamic to const in crypto/ov * Updated var names * Check * Moved decorators * Fixed import issues * Fixed tests, update payoff controller * Fixed tests * Fixed pylint * Updated files * Added base class * Added reset * Improved base class * For James * More menues converted * Added contexts * 24 controllers left * 18 Controllers left * Changes choices * 9 controllers left * Added all controllers * Fixed glitch * Replaced all improper callings of class * Removed menu decorator * refactored try_except * Last commit * Black fix * Bug fix * Added James' new menus * Fixed tests * Fixed 8 tests * Fixing mypy issue * Updating tests : stocks/options * Fixed options * Fixed tests * Updating tests : stocks/options * Fixed tests * More test fixes * Updating tests : stocks/ba * Fixed options test * More bug fixes * Fixed tests * fixed pylint * Skipped test_call_load * Add typings to base class * Fix issue with appending auto completer options + bugfixes * Add typings to base class * Terminal throws error for bad path * sexy solution to auto completer in runtime * more sexy reset with reset_level stored * no so sexy jump between indirect menus * Removing choices argument * refactor custom_reset * Fixed tests * Theo fixes * Added back function * Fixed tests Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: DidierRLopes <[email protected]>
call_ca
006b3570b795215a17c64841110b649b03db9a98
OpenBBTerminal
screener_controller.py
13
7
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
42
0
22
74
Python
{ "docstring": "Call the comparison analysis menu with selected tickers", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def call_ca(self, _): if self.screen_tickers: self.queue = ca_controller.ComparisonAnalysisController( self.screen_tickers, self.queue ).menu(custom_path_menu_above="/stocks/") else: print("Some tickers must be screened first through one of the presets!\n")
42,460
177,607
573
label_studio/data_manager/actions/basic.py
191
26
def delete_tasks_predictions(project, queryset, **kwargs): task_ids = queryset.values_list('id', flat=True) predictions = Prediction.objects.filter(task__id__in=task_ids) count = predictions.count() predictions.delete() queryset.update(updated_at=datetime.now()) return {'processed_items': count, 'detail': 'Deleted ' + str(count) + ' predictions'} actions = [ { 'e
feat: DEV-1205: Add task.updated_at column (#1784) * Update task.updated_at on annotation update (DEV-1205) * Fix set updated_at on annotation delete (DEV-1205) * Set update_at for every dm action (DEV-1205) * Stop changing updated_at on actions (DEV-1205) * Update experimental.py Co-authored-by: Max Tkachenko <[email protected]> Co-authored-by: niklub <[email protected]>
delete_tasks_predictions
1c4328c5a8b10ee20ac4328ce30612d106350699
label-studio
basic.py
11
7
https://github.com/heartexlabs/label-studio.git
1
76
0
100
406
Python
{ "docstring": " Delete all predictions by tasks ids\n\n :param project: project instance\n :param queryset: filtered tasks db queryset\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
def delete_tasks_predictions(project, queryset, **kwargs): task_ids = queryset.values_list('id', flat=True) predictions = Prediction.objects.filter(task__id__in=task_ids) count = predictions.count() predictions.delete() queryset.update(updated_at=datetime.now()) return {'processed_items': count, 'detail': 'Deleted ' + str(count) + ' predictions'} actions = [ { 'entry_point': retrieve_tasks_predictions, 'permission': all_permissions.predictions_any, 'title': 'Retrieve Predictions', 'order': 90, 'dialog': { 'text': 'Send the selected tasks to all ML backends connected to the project.' 'This operation might be abruptly interrupted due to a timeout. ' 'The recommended way to get predictions is to update tasks using the Label Studio API.' '<a href="https://labelstud.io/guide/ml.html>See more in the documentation</a>.' 'Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks, 'permission': all_permissions.tasks_delete, 'title': 'Delete Tasks', 'order': 100, 'reload': True, 'dialog': { 'text': 'You are going to delete the selected tasks. Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks_annotations, 'permission': all_permissions.tasks_delete, 'title': 'Delete Annotations', 'order': 101, 'dialog': { 'text': 'You are going to delete all annotations from the selected tasks. Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks_predictions, 'permission': all_permissions.predictions_any, 'title': 'Delete Predictions', 'order': 102, 'dialog': { 'text': 'You are going to delete all predictions from the selected tasks. Please confirm your action.', 'type': 'confirm' } } ]
24,962
113,577
226
nni/compression/pytorch/base/scheduler.py
42
11
def clean_up(self): if not self._cleaned: for ref in self.referenced_paths(): self._reference_counter[ref] -= 1 if self._reference_counter[ref] <= 0: os.remove(ref) if s
[Compression] remove pruning v1 & refactor directory (#5228)
clean_up
d68c786ff81bad19c04619d6a999ff34aaa724e7
nni
scheduler.py
17
12
https://github.com/microsoft/nni.git
5
87
0
36
141
Python
{ "docstring": "\n Counter of referenced file paths subtract 1. If the counter reach 0, then delete the file.\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
def clean_up(self): if not self._cleaned: for ref in self.referenced_paths(): self._reference_counter[ref] -= 1 if self._reference_counter[ref] <= 0: os.remove(ref) if self._reference_counter[ref] < 0: _logger.warning('Referance counter error, the number of %s is %d', ref, self._reference_counter[ref]) self._cleaned = True else: _logger.warning('Already clean up task %d', self.task_id)
51,277
205,911
380
django/db/utils.py
136
22
def load_backend(backend_name): # This backend was renamed in Django 1.9.
Refs #33476 -- Reformatted code with Black.
load_backend
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
utils.py
17
23
https://github.com/django/django.git
8
119
0
100
211
Python
{ "docstring": "\n Return a database backend's \"base\" module given a fully qualified database\n backend name, or raise an error if it doesn't exist.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 19 }
def load_backend(backend_name): # This backend was renamed in Django 1.9. if backend_name == "django.db.backends.postgresql_psycopg2": backend_name = "django.db.backends.postgresql" try: return import_module("%s.base" % backend_name) except ImportError as e_user: # The database backend wasn't found. Display a helpful error message # listing all built-in database backends. import django.db.backends builtin_backends = [ name for _, name, ispkg in pkgutil.iter_modules(django.db.backends.__path__) if ispkg and name not in {"base", "dummy"} ] if backend_name not in ["django.db.backends.%s" % b for b in builtin_backends]: backend_reprs = map(repr, sorted(builtin_backends)) raise ImproperlyConfigured( "%r isn't an available database backend or couldn't be " "imported. Check the above exception. To use one of the " "built-in backends, use 'django.db.backends.XXX', where XXX " "is one of:\n" " %s" % (backend_name, ", ".join(backend_reprs)) ) from e_user else: # If there's some other error, this must be an error in Django raise
27,260
122,870
124
jax/_src/pjit.py
69
14
def explode_superdims(sizes, dims): strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))} dims = list(reversed(dims)) final_dims = [] for size, stride in
Move `pjit.py` to `jax/_src` in preparation for merging the `jit` and `pjit` frontend APIs PiperOrigin-RevId: 495944279
explode_superdims
4b587fa1f0049db5366fd04812ab940d80a71a22
jax
pjit.py
12
18
https://github.com/google/jax.git
4
118
0
40
186
Python
{ "docstring": "Explode superdims to fit a known shape.\n\n The unflattening process might mistakenly generate too few too large dimensions.\n For example, ``unflatten_superdims(np.arange(n))`` always returns ``[(n, 1)]``.\n This function takes a list of such contiguous super-dimensions and splits them\n into smaller dimensions such that::\n\n set(map(fst, explode_superdims(sizes, dims))) == set(sizes)\n ", "language": "en", "n_whitespaces": 55, "n_words": 47, "vocab_size": 44 }
def explode_superdims(sizes, dims): strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))} dims = list(reversed(dims)) final_dims = [] for size, stride in dims: target_size = strides_to_sizes[stride] new_dims = [] while size > target_size: assert target_size > 1 # Ensure progress assert size % target_size == 0 new_dims.append((target_size, stride)) size //= target_size stride *= target_size target_size = strides_to_sizes[stride] assert size == target_size new_dims.append((size, stride)) final_dims += reversed(new_dims) return final_dims
41,886
176,421
100
networkx/classes/function.py
39
17
def path_weight(G, path, weight): multigraph = G.is_multigraph() cost = 0 if not nx.is_path(G, path): raise nx.NetworkXNoPath("path does not exist") for node, nbr in nx.utils.pairwise(path): if multigraph: cost += min(v[weight] for v in G[
Correct typo in docstring (int -> float) (#5398) * Correct typo in docstring (int -> float) This is based on https://stackoverflow.com/q/71494698/10693596 * Update function.py * Update function.py
path_weight
eb22e121816896ec0664c41a0232e2f80a259b96
networkx
function.py
17
11
https://github.com/networkx/networkx.git
5
94
0
30
148
Python
{ "docstring": "Returns total cost associated with specified path and weight\n\n Parameters\n ----------\n G : graph\n A NetworkX graph.\n\n path: list\n A list of node labels which defines the path to traverse\n\n weight: string\n A string indicating which edge attribute to use for path cost\n\n Returns\n -------\n cost: int or float\n An integer or a float representing the total cost with respect to the\n specified weight of the specified path\n\n Raises\n ------\n NetworkXNoPath\n If the specified edge does not exist.\n ", "language": "en", "n_whitespaces": 156, "n_words": 78, "vocab_size": 51 }
def path_weight(G, path, weight): multigraph = G.is_multigraph() cost = 0 if not nx.is_path(G, path): raise nx.NetworkXNoPath("path does not exist") for node, nbr in nx.utils.pairwise(path): if multigraph: cost += min(v[weight] for v in G[node][nbr].values()) else: cost += G[node][nbr][weight] return cost
52,477
208,718
224
IPython/core/history.py
96
18
def _run_sql(self, sql, params, raw=True, output=False, latest=False): toget = 'source_raw' if raw else 'source' sqlfrom = "history" if output: sqlfrom = "history LEFT JOIN output_history USING (session, line)" toget = "history.%s, output_history.output" % toget if latest: toget += ", MAX(session * 128 * 1024 + line)" this_querry = "SELECT session, line, %s FROM %s " % (toget, sqlfrom) + sql cur = self.db.execute(this_querry, params) if latest: cur = (row[:-1] for row in cur) if output: # Regroup into 3-t
This fixed the mixing of multiple history seen in #13631 It forces get_tail to put the current session last in the returned results.
_run_sql
dc5bcc1c50892a5128fcf128af28887226144927
ipython
history.py
12
15
https://github.com/ipython/ipython.git
8
118
0
68
188
Python
{ "docstring": "Prepares and runs an SQL query for the history database.\n\n Parameters\n ----------\n sql : str\n Any filtering expressions to go after SELECT ... FROM ...\n params : tuple\n Parameters passed to the SQL query (to replace \"?\")\n raw, output : bool\n See :meth:`get_range`\n latest : bool\n Select rows with max (session, line)\n\n Returns\n -------\n Tuples as :meth:`get_range`\n ", "language": "en", "n_whitespaces": 171, "n_words": 57, "vocab_size": 46 }
def _run_sql(self, sql, params, raw=True, output=False, latest=False): toget = 'source_raw' if raw else 'source' sqlfrom = "history" if output: sqlfrom = "history LEFT JOIN output_history USING (session, line)" toget = "history.%s, output_history.output" % toget if latest: toget += ", MAX(session * 128 * 1024 + line)" this_querry = "SELECT session, line, %s FROM %s " % (toget, sqlfrom) + sql cur = self.db.execute(this_querry, params) if latest: cur = (row[:-1] for row in cur) if output: # Regroup into 3-tuples, and parse JSON return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur) return cur
29,172
130,247
56
python/ray/_private/thirdparty/pathspec/pathspec.py
13
7
def __add__(self, other):
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
__add__
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
pathspec.py
11
5
https://github.com/ray-project/ray.git
2
31
0
12
51
Python
{ "docstring": "\n Combines the :attr:`Pathspec.patterns` patterns from two\n :class:`PathSpec` instances.\n ", "language": "en", "n_whitespaces": 30, "n_words": 8, "vocab_size": 8 }
def __add__(self, other): if isinstance(other, PathSpec): return PathSpec(self.patterns + other.patterns) else: return NotImplemented
28,871
129,004
40
python/ray/node.py
8
5
def address(self): if use_gcs_for_bootstrap(): return self._gcs_address return self._redis_address
[GCS][Bootstrap n/n] Do not start Redis in GCS bootstrapping mode (#21232) After this change in GCS bootstrapping mode, Redis no longer starts and `address` is treated as the GCS address of the Ray cluster. Co-authored-by: Yi Cheng <[email protected]> Co-authored-by: Yi Cheng <[email protected]>
address
70db5c5592d94b611fee0a334414f1f4f5cc151a
ray
node.py
8
4
https://github.com/ray-project/ray.git
2
19
0
7
34
Python
{ "docstring": "Get the address for bootstrapping, e.g. the address to pass to\n `ray start` or `ray.int()` to start worker nodes, that has been\n converted to ip:port format.\n ", "language": "en", "n_whitespaces": 47, "n_words": 26, "vocab_size": 21 }
def address(self): if use_gcs_for_bootstrap(): return self._gcs_address return self._redis_address
9,863
49,675
619
modules/text/language_model/simnet_bow/module.py
149
51
def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1): if use_gpu: try: _places = os.environ["CUDA_VISIBLE_DEVICES"] int(_places[0]) except: raise RuntimeError( "Environment Variable CUDA_VISIBLE_DEVICES
Remove fluid api in modules and pkg. (#1906)
similarity
8468e1ac6cfe165aa1e3cf4f77ab6fb66ce98614
PaddleHub
module.py
15
42
https://github.com/PaddlePaddle/PaddleHub.git
6
363
0
106
590
Python
{ "docstring": "\n Get the sentiment prediction results results with the texts as input\n Args:\n texts(list): the input texts to be predicted which the first element is text_1(list)\n and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']]\n if texts not data.\n data(dict): key must be 'text_1' and 'text_2', value is the texts(list) to be predicted\n use_gpu(bool): whether use gpu to predict or not\n batch_size(int): the program deals once with one batch\n Returns:\n results(list): the word segmentation results\n ", "language": "en", "n_whitespaces": 214, "n_words": 75, "vocab_size": 51 }
def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1): if use_gpu: try: _places = os.environ["CUDA_VISIBLE_DEVICES"] int(_places[0]) except: raise RuntimeError( "Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id." ) data = self.check_data(texts, data) start_idx = 0 iteration = int(math.ceil(len(data['text_1']) / batch_size)) results = [] for i in range(iteration): batch_data = {'text_1': [], 'text_2': []} if i < (iteration - 1): batch_data['text_1'] = data['text_1'][start_idx:(start_idx + batch_size)] batch_data['text_2'] = data['text_2'][start_idx:(start_idx + batch_size)] else: batch_data['text_1'] = data['text_1'][start_idx:(start_idx + batch_size)] batch_data['text_2'] = data['text_2'][start_idx:(start_idx + batch_size)] start_idx = start_idx + batch_size processed_results = preprocess(self.word_seg_module, self.vocab, batch_data, use_gpu, batch_size) data_1, lod_1, shape_1 = self._texts_process(processed_results["text_1"]) data_2, lod_2, shape_2 = self._texts_process(processed_results["text_2"]) predictor = self.gpu_predictor if use_gpu else self.cpu_predictor input_names = predictor.get_input_names() input_handle = predictor.get_input_handle(input_names[0]) input_handle.copy_from_cpu(data_1) input_handle.set_lod(lod_1) input_handle.reshape(shape_1) input_handle = predictor.get_input_handle(input_names[1]) input_handle.copy_from_cpu(data_2) input_handle.set_lod(lod_2) input_handle.reshape(shape_2) predictor.run() output_names = predictor.get_output_names() output_handle = predictor.get_output_handle(output_names[1]) batch_out = output_handle.copy_to_cpu() batch_result = postprocess(batch_out, processed_results) results += batch_result return results
1,484
8,699
267
ludwig/collect.py
113
32
def cli_collect_weights(sys_argv): parser = argparse.ArgumentParser( description="This script loads a pretrained model " "and uses it collect weights.", prog="ludwig collect_weights", usage="%(prog)s [options]", ) # ---------------- # Model parameters # ---------------- parser.add_argument("-m", "--model_path", help="model to load", required=True) parser.add_argument("-t", "--tensors", help="tensors to collect", nargs="+", required=True) # ------------------------- # Output results parameters # ------------------------- parser.add_argument( "-od", "--output_directory", type=str, default="results", help="directory that contains the results" ) # ------------
[Annotations] Logging Level Registry (#2814) * Add DeveloperAPI annotations to some utils * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove annotations for private methods * [Annotations] Logging Level Registry Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
cli_collect_weights
6ee67ef2d2098d236e06d1d7672d92fc192c55b0
ludwig
collect.py
11
29
https://github.com/ludwig-ai/ludwig.git
3
202
0
88
365
Python
{ "docstring": "Command Line Interface to collecting the weights for the model.\n\n --m: Input model that is necessary to collect to the tensors, this is a\n required *option*\n --t: Tensors to collect\n --od: Output directory of the model, defaults to results\n --v: Verbose: Defines the logging level that the user will be exposed to\n ", "language": "en", "n_whitespaces": 75, "n_words": 52, "vocab_size": 39 }
def cli_collect_weights(sys_argv): parser = argparse.ArgumentParser( description="This script loads a pretrained model " "and uses it collect weights.", prog="ludwig collect_weights", usage="%(prog)s [options]", ) # ---------------- # Model parameters # ---------------- parser.add_argument("-m", "--model_path", help="model to load", required=True) parser.add_argument("-t", "--tensors", help="tensors to collect", nargs="+", required=True) # ------------------------- # Output results parameters # ------------------------- parser.add_argument( "-od", "--output_directory", type=str, default="results", help="directory that contains the results" ) # ------------------ # Runtime parameters # ------------------ parser.add_argument( "-l", "--logging_level", default="info", help="the level of logging to use", choices=["critical", "error", "warning", "info", "debug", "notset"], ) add_contrib_callback_args(parser) args = parser.parse_args(sys_argv) args.callbacks = args.callbacks or [] for callback in args.callbacks: callback.on_cmdline("collect_weights", *sys_argv) args.logging_level = get_logging_level_registry()[args.logging_level] logging.getLogger("ludwig").setLevel(args.logging_level) global logger logger = logging.getLogger("ludwig.collect") print_ludwig("Collect Weights", LUDWIG_VERSION) collect_weights(**vars(args))
14,093
66,051
14
erpnext/hr/doctype/daily_work_summary/daily_work_summary.py
20
9
def get_user_emails_from_group(group): group_doc = group if isinstance(group_doc, str): group_doc = frappe.g
style: format code with black
get_user_emails_from_group
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
daily_work_summary.py
11
6
https://github.com/frappe/erpnext.git
2
35
0
16
60
Python
{ "docstring": "Returns list of email of enabled users from the given group\n\n\t:param group: Daily Work Summary Group `name`", "language": "en", "n_whitespaces": 16, "n_words": 18, "vocab_size": 17 }
def get_user_emails_from_group(group): group_doc = group if isinstance(group_doc, str): group_doc = frappe.get_doc("Daily Work Summary Group", group) emails = get_users_email(group_doc) return emails
50,936
204,859
129
django/db/backends/base/operations.py
31
13
def adapt_unknown_value(self, value): if isinstance(value, datetime.datetime): #
Refs #33476 -- Reformatted code with Black.
adapt_unknown_value
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
operations.py
10
11
https://github.com/django/django.git
5
80
0
22
127
Python
{ "docstring": "\n Transform a value to something compatible with the backend driver.\n\n This method only depends on the type of the value. It's designed for\n cases where the target type isn't known, such as .raw() SQL queries.\n As a consequence it may not work perfectly in all circumstances.\n ", "language": "en", "n_whitespaces": 82, "n_words": 46, "vocab_size": 41 }
def adapt_unknown_value(self, value): if isinstance(value, datetime.datetime): # must be before date return self.adapt_datetimefield_value(value) elif isinstance(value, datetime.date): return self.adapt_datefield_value(value) elif isinstance(value, datetime.time): return self.adapt_timefield_value(value) elif isinstance(value, decimal.Decimal): return self.adapt_decimalfield_value(value) else: return value
16,049
73,532
190
wagtail/contrib/settings/tests/test_templates.py
54
14
def test_get_settings_variable_assignment_request_context(self): request = self.get_request(site=self.other_site) context = Context({"request": request}) template = Template( "{% load wagtailsettings_tags %}" "{% get_settings as wagtai
Reformat with black
test_get_settings_variable_assignment_request_context
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_templates.py
11
15
https://github.com/wagtail/wagtail.git
1
74
0
35
137
Python
{ "docstring": "\n Check that assigning the setting to a context variable with\n {% get_settings as wagtail_settings %} works.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
def test_get_settings_variable_assignment_request_context(self): request = self.get_request(site=self.other_site) context = Context({"request": request}) template = Template( "{% load wagtailsettings_tags %}" "{% get_settings as wagtail_settings %}" "{{ wagtail_settings.tests.testsetting.title}}" ) self.assertEqual(template.render(context), self.other_site_settings.title) # Also check that the default 'settings' variable hasn't been set template = Template( "{% load wagtailsettings_tags %}" "{% get_settings as wagtail_settings %}" "{{ settings.tests.testsetting.title}}" ) self.assertEqual(template.render(context), "")
35,894
154,275
416
modin/core/io/column_stores/parquet_dispatcher.py
109
14
def get_dataset(cls, path, engine, storage_options): if engine == "auto": # We follow in concordance with pandas engine_classes =
FEAT-#4733: Support fastparquet as engine for `read_parquet` (#4807) Signed-off-by: Karthik Velayutham <[email protected]>
get_dataset
b240370bf83c88589d293b76b4a2409294e06f90
modin
parquet_dispatcher.py
16
24
https://github.com/modin-project/modin.git
6
103
0
82
193
Python
{ "docstring": "\n Retrieve Parquet engine specific Dataset implementation.\n\n Parameters\n ----------\n path : str, path object or file-like object\n The filepath of the parquet file in local filesystem or hdfs.\n engine : str\n Parquet library to use (only 'PyArrow' is supported for now).\n storage_options : dict\n Parameters for specific storage engine.\n\n Returns\n -------\n Dataset\n Either a PyArrowDataset or FastParquetDataset object.\n ", "language": "en", "n_whitespaces": 172, "n_words": 57, "vocab_size": 45 }
def get_dataset(cls, path, engine, storage_options): if engine == "auto": # We follow in concordance with pandas engine_classes = [PyArrowDataset, FastParquetDataset] error_msgs = "" for engine_class in engine_classes: try: return engine_class(path, storage_options) except ImportError as err: error_msgs += "\n - " + str(err) raise ImportError( "Unable to find a usable engine; " + "tried using: 'pyarrow', 'fastparquet'.\n" + "A suitable version of " + "pyarrow or fastparquet is required for parquet " + "support.\n" + "Trying to import the above resulted in these errors:" + f"{error_msgs}" ) elif engine == "pyarrow": return PyArrowDataset(path, storage_options) elif engine == "fastparquet": return FastParquetDataset(path, storage_options) else: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
6,472
35,529
27
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py
6
6
def test_causal_lm_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past
Fix tf.concatenate + test past_key_values for TF models (#15774) * fix wrong method name tf.concatenate * add tests related to causal LM / decoder * make style and quality * clean-up * Fix TFBertModel's extended_attention_mask when past_key_values is provided * Fix tests * fix copies * More tf.int8 -> tf.int32 in TF test template * clean-up * Update TF test template * revert the previous commit + update the TF test template * Fix TF template extended_attention_mask when past_key_values is provided * Fix some styles manually * clean-up * Fix ValueError: too many values to unpack in the test * Fix more: too many values to unpack in the test * Add a comment for extended_attention_mask when there is past_key_values * Fix TFElectra extended_attention_mask when past_key_values is provided * Add tests to other TF models * Fix for TF Electra test: add prepare_config_and_inputs_for_decoder * Fix not passing training arg to lm_head in TFRobertaForCausalLM * Fix tests (with past) for TF Roberta * add testing for pask_key_values for TFElectra model Co-authored-by: ydshieh <[email protected]>
test_causal_lm_model_past_with_attn_mask
8635407bc724c45142c1f91dbc9ef3ea681e1a56
transformers
test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py
9
3
https://github.com/huggingface/transformers.git
1
24
0
6
43
Python
{ "docstring": "Test the causal LM model with `past_key_values` and `attention_mask`", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_causal_lm_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs)
57,097
223,838
65
python3.10.4/Lib/email/mime/audio.py
25
11
def _whatsnd(data): hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None:
add python 3.10.4 for windows
_whatsnd
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
audio.py
12
8
https://github.com/XX-net/XX-Net.git
3
52
0
21
83
Python
{ "docstring": "Try to identify a sound file type.\n\n sndhdr.what() has a pretty cruddy interface, unfortunately. This is why\n we re-do it here. It would be easier to reverse engineer the Unix 'file'\n command and use the standard 'magic' file, as shipped with a modern Unix.\n ", "language": "en", "n_whitespaces": 58, "n_words": 44, "vocab_size": 40 }
def _whatsnd(data): hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None
110,574
311,921
281
tests/util/test_async.py
37
13
async def test_protect_loop_debugger_sleep(caplog): block_async_io.enable() with patch( "homeassistant.util.async_.extract_stack", return_value=[ Mock( filename="/home/paulus/homeassistant/.venv/blah/pydevd.py", lineno="23",
Don't warn on time.sleep injected by the debugger (#65420)
test_protect_loop_debugger_sleep
5a34feb7de440e0df748c9db500facc72a4c2646
core
test_async.py
15
24
https://github.com/home-assistant/core.git
1
84
0
31
148
Python
{ "docstring": "Test time.sleep injected by the debugger is not reported.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def test_protect_loop_debugger_sleep(caplog): block_async_io.enable() with patch( "homeassistant.util.async_.extract_stack", return_value=[ Mock( filename="/home/paulus/homeassistant/.venv/blah/pydevd.py", lineno="23", line="do_something()", ), Mock( filename="/home/paulus/homeassistant/util/async.py", lineno="123", line="protected_loop_func", ), Mock( filename="/home/paulus/homeassistant/util/async.py", lineno="123", line="check_loop()", ), ], ): time.sleep(0) assert "Detected blocking call inside the event loop" not in caplog.text
1,761
9,894
19
jina/peapods/pods/__init__.py
5
5
def update_worker_pea_args(self): self.peas_args['peas'] = self._set_peas_args(self.args)
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
update_worker_pea_args
933415bfa1f9eb89f935037014dfed816eb9815d
jina
__init__.py
9
2
https://github.com/jina-ai/jina.git
1
21
0
5
38
Python
{ "docstring": " Update args of all its worker peas based on Pod args. Does not touch head and tail", "language": "en", "n_whitespaces": 17, "n_words": 17, "vocab_size": 17 }
def update_worker_pea_args(self): self.peas_args['peas'] = self._set_peas_args(self.args)
48,297
197,040
261
sympy/ntheory/generate.py
88
11
def prevprime(n): n = _as_int_ceiling(n) if n < 3: raise ValueError("no preceding primes") if n < 8: return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n] if n <= sieve._list[-1]: l, u = sieve.search(n) if l == u: return sieve[l-1] else: return sieve[l] nn = 6*(n//6) if n - nn <= 1: n = nn - 1 if isprime(n): return n n -= 4 else: n = nn + 1 while 1: if isprime(n): return n n -= 2 if isprime(n):
Refactored import ordering in functions
prevprime
e0dc14eca132f37c5f49369eb4051eae37c9b119
sympy
generate.py
11
27
https://github.com/sympy/sympy.git
10
154
0
45
248
Python
{ "docstring": " Return the largest prime smaller than n.\n\n Notes\n =====\n\n Potential primes are located at 6*j +/- 1. This\n property is used during searching.\n\n >>> from sympy import prevprime\n >>> [(i, prevprime(i)) for i in range(10, 15)]\n [(10, 7), (11, 7), (12, 11), (13, 11), (14, 13)]\n\n See Also\n ========\n\n nextprime : Return the ith prime greater than n\n primerange : Generates all primes in a given range\n ", "language": "en", "n_whitespaces": 148, "n_words": 67, "vocab_size": 57 }
def prevprime(n): n = _as_int_ceiling(n) if n < 3: raise ValueError("no preceding primes") if n < 8: return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n] if n <= sieve._list[-1]: l, u = sieve.search(n) if l == u: return sieve[l-1] else: return sieve[l] nn = 6*(n//6) if n - nn <= 1: n = nn - 1 if isprime(n): return n n -= 4 else: n = nn + 1 while 1: if isprime(n): return n n -= 2 if isprime(n): return n n -= 4
106,525
307,759
447
tests/components/recorder/test_statistics.py
117
32
def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog): hass = hass_recorder() wait_recording_done(hass) period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "state_unit_of_measurement": "kWh", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_statistics_1 = [ { "start": period1, "last_reset": None, "state": 3, "sum": 5, }, ] external_energy_statistics_2 = [ { "start": period2, "last_reset": None, "state": 3, "sum": 6, } ] with patch.object( statistics, "_statistics_exists", return_value=False ), patch.object( statistics, "_insert_statistics", wraps=statistics._insert_statistics ) as insert_statistics_mock: async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) as
Display statistics in the source's unit (#78031)
test_duplicate_statistics_handle_integrity_error
dd20a7ea62fc003748c5f0cf99be25c69c9b5a05
core
test_statistics.py
14
50
https://github.com/home-assistant/core.git
1
224
0
79
387
Python
{ "docstring": "Test the recorder does not blow up if statistics is duplicated.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog): hass = hass_recorder() wait_recording_done(hass) period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "state_unit_of_measurement": "kWh", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_statistics_1 = [ { "start": period1, "last_reset": None, "state": 3, "sum": 5, }, ] external_energy_statistics_2 = [ { "start": period2, "last_reset": None, "state": 3, "sum": 6, } ] with patch.object( statistics, "_statistics_exists", return_value=False ), patch.object( statistics, "_insert_statistics", wraps=statistics._insert_statistics ) as insert_statistics_mock: async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_2 ) wait_recording_done(hass) assert insert_statistics_mock.call_count == 3 with session_scope(hass=hass) as session: tmp = session.query(recorder.db_schema.Statistics).all() assert len(tmp) == 2 assert "Blocked attempt to insert duplicated statistic rows" in caplog.text
29,587
131,786
799
python/ray/tests/test_resource_demand_scheduler.py
130
35
def testRequestResourcesRaceConditionWithResourceDemands(self): config = copy.deepcopy(MULTI_WORKER_CLUSTER) config["available_node_types"].update( { "empty_node":
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
testRequestResourcesRaceConditionWithResourceDemands
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
test_resource_demand_scheduler.py
14
61
https://github.com/ray-project/ray.git
3
310
0
78
521
Python
{ "docstring": "Test request_resources() with resource_demands.\n\n Tests when request_resources() is called simultaneously with resource\n demands in multiple orders.\n ", "language": "en", "n_whitespaces": 37, "n_words": 16, "vocab_size": 14 }
def testRequestResourcesRaceConditionWithResourceDemands(self): config = copy.deepcopy(MULTI_WORKER_CLUSTER) config["available_node_types"].update( { "empty_node": { "node_config": {}, "resources": {"CPU": 2, "GPU": 1}, "max_workers": 1, }, "def_worker": { "node_config": {}, "resources": {"CPU": 2, "GPU": 1, "WORKER": 1}, "max_workers": 3, }, } ) config["idle_timeout_minutes"] = 0 config_path = self.write_config(config) self.provider = MockProvider() self.provider.create_node( {}, { TAG_RAY_NODE_KIND: "head", TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, TAG_RAY_USER_NODE_TYPE: "empty_node", }, 1, ) runner = MockProcessRunner() runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)]) lm = LoadMetrics() autoscaler = MockAutoscaler( config_path, lm, MockNodeInfoStub(), max_failures=0, process_runner=runner, update_interval_s=0, ) lm.update( "127.0.0.0", mock_raylet_id(), {"CPU": 2, "GPU": 1}, {"CPU": 2}, {}, waiting_bundles=[{"CPU": 2}], ) autoscaler.load_metrics.set_resource_requests([{"CPU": 2, "GPU": 1}] * 2) autoscaler.update() # 1 head, 1 worker. self.waitForNodes(2) lm.update( "127.0.0.0", mock_raylet_id(), {"CPU": 2, "GPU": 1}, {"CPU": 2}, {}, waiting_bundles=[{"CPU": 2}], ) # make sure it stays consistent. for _ in range(10): autoscaler.update() self.waitForNodes(2)
17,397
82,431
279
cms/tests/test_sitemap.py
56
30
def test_sitemap_unpublished_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() unpublished_titles = set() for item in urlset: locations.append(item['location']) for page in Page.objects.drafts(): if page.get_public_object(): set1 = set(page.get_public_object().title_set.values_list('path', flat=True)) set2 = set(page.title_set.values_list('path', flat=True)) unpublished_titles.update(set2.difference(set1)) else: unpublished_titles.update(page.title_set.values_list('path', flat=True)) for path in unpublished_titles: title = Title.objects.get(path=path) if title.path: url = f'http://example.com/{title.language}/{title.path}/' else:
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <[email protected]> * ci: codespell config taken from #7292
test_sitemap_unpublished_titles
c1290c9ff89cb00caa5469129fd527e9d82cd820
django-cms
test_sitemap.py
17
21
https://github.com/django-cms/django-cms.git
6
167
0
38
308
Python
{ "docstring": "\n Check that titles attached to unpublished pages are not in the urlset.\n As titles are 'published' depending on their attached page, we create a\n set of unpublished titles by checking titles attached to the draft and\n public version of each page\n ", "language": "en", "n_whitespaces": 77, "n_words": 41, "vocab_size": 31 }
def test_sitemap_unpublished_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() unpublished_titles = set() for item in urlset: locations.append(item['location']) for page in Page.objects.drafts(): if page.get_public_object(): set1 = set(page.get_public_object().title_set.values_list('path', flat=True)) set2 = set(page.title_set.values_list('path', flat=True)) unpublished_titles.update(set2.difference(set1)) else: unpublished_titles.update(page.title_set.values_list('path', flat=True)) for path in unpublished_titles: title = Title.objects.get(path=path) if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' self.assertFalse(url in locations)
2,555
13,120
543
jina/parsers/orchestrate/runtimes/remote.py
160
22
def mixin_gateway_parser(parser): gp = add_arg_group(parser, title='Gateway') _add_host(gp) _add_proxy(gp) gp.add_argument( '--uses', type=str, default=None, # TODO: add Jina Hub Gateway help=, ) gp.add_argument( '--uses-with', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--py-modules', type=str, nargs='*', metavar='PATH', help=, ) mixin_base_runtime_parser(gp) gp.add_argument( '--port-expose', type=int, dest='port'
feat: allow passing custom gateway in Flow (#5189)
mixin_gateway_parser
cdaf7f87ececf9e13b517379ca183b17f0d7b007
jina
remote.py
10
87
https://github.com/jina-ai/jina.git
1
237
0
108
404
Python
{ "docstring": "Add the options for remote expose at the Gateway\n :param parser: the parser\n \n The config of the gateway, it could be one of the followings:\n * the string literal of an Gateway class name\n * a Gateway YAML file (.yml, .yaml, .jaml)\n * a docker image (must start with `docker://`)\n * the string literal of a YAML config (must start with `!` or `jtype: `)\n * the string literal of a JSON config\n\n When use it under Python, one can use the following values additionally:\n - a Python dict that represents the config\n - a text file stream has `.read()` interface\n \n Dictionary of keyword arguments that will override the `with` configuration in `uses`\n \nThe customized python modules need to be imported before loading the gateway\n\nNote that the recommended way is to only import a single module - a simple python file, if your\ngateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files,\nwhich should be structured as a python package.\n", "language": "en", "n_whitespaces": 249, "n_words": 169, "vocab_size": 102 }
def mixin_gateway_parser(parser): gp = add_arg_group(parser, title='Gateway') _add_host(gp) _add_proxy(gp) gp.add_argument( '--uses', type=str, default=None, # TODO: add Jina Hub Gateway help=, ) gp.add_argument( '--uses-with', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--py-modules', type=str, nargs='*', metavar='PATH', help=, ) mixin_base_runtime_parser(gp) gp.add_argument( '--port-expose', type=int, dest='port', default=helper.random_port(), help='The port that the gateway exposes for clients for GRPC connections.', ) parser.add_argument( '--graph-description', type=str, help='Routing graph for the gateway', default='{}', ) parser.add_argument( '--graph-conditions', type=str, help='Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.', default='{}', ) parser.add_argument( '--deployments-addresses', type=str, help='dictionary JSON with the input addresses of each Deployment', default='{}', ) parser.add_argument( '--deployments-disable-reduce', type=str, help='list JSON disabling the built-in merging mechanism for each Deployment listed', default='[]', ) gp.add_argument( '--compression', choices=['NoCompression', 'Deflate', 'Gzip'], help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, ' 'check https://grpc.github.io/grpc/python/grpc.html#compression.', ) gp.add_argument( '--timeout-send', type=int, default=None, help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default', )
56,687
222,649
211
python3.10.4/Lib/distutils/command/bdist_rpm.py
50
9
def _format_changelog(self, changelog): if not changelog: return changelog new_changelog = [] for line in changelog.strip().split('\n'): line = line.strip() if line[0] == '*': new_changelog.extend(['', line]) elif line[0] == '-': new_changelog.append(line) else: new_changelog.append(' ' + line) # strip trailing newline inserted by first changelog entry if not new_changelog[0]: del new_changelog[0] return ne
add python 3.10.4 for windows
_format_changelog
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
bdist_rpm.py
14
15
https://github.com/XX-net/XX-Net.git
6
95
0
40
165
Python
{ "docstring": "Format the changelog correctly and convert it to a list of strings\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 12 }
def _format_changelog(self, changelog): if not changelog: return changelog new_changelog = [] for line in changelog.strip().split('\n'): line = line.strip() if line[0] == '*': new_changelog.extend(['', line]) elif line[0] == '-': new_changelog.append(line) else: new_changelog.append(' ' + line) # strip trailing newline inserted by first changelog entry if not new_changelog[0]: del new_changelog[0] return new_changelog
44,221
183,499
55
src/textual/_animator.py
27
5
def _get_time(self) -> float: # N.B. We could remove this method and always call `self._timer.get_time()
[App] Finally, time mocking in tests seems to be working! 😅 I had to add a flag in the `_timer` module that allows us to completely disable the "skip" feature of Timers, though - but it shouldn't cause too much trouble 🤞
_get_time
15df75919744fbea824bbf029cfb56029a3d0dc8
textual
_animator.py
8
3
https://github.com/Textualize/textual.git
1
16
0
26
31
Python
{ "docstring": "Get the current wall clock time, via the internal Timer.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def _get_time(self) -> float: # N.B. We could remove this method and always call `self._timer.get_time()` internally, # but it's handy to have in mocking situations return self._timer.get_time()
43,607
181,829
683
tpot/base.py
131
34
def _generate(self, pset, min_, max_, condition, type_=None): if type_ is None: type_ = pset.ret expr = [] height = np.random.randint(min_, max_) stack = [(0, type_)] while len(stack) != 0: depth, type_ = stack.pop() # We've added a type_ parameter to the condition function
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
_generate
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
base.py
19
35
https://github.com/EpistasisLab/tpot.git
8
221
0
83
357
Python
{ "docstring": "Generate a Tree as a list of lists.\n\n The tree is build from the root to the leaves, and it stop growing when\n the condition is fulfilled.\n\n Parameters\n ----------\n pset: PrimitiveSetTyped\n Primitive set from which primitives are selected.\n min_: int\n Minimum height of the produced trees.\n max_: int\n Maximum height of the produced trees.\n condition: function\n The condition is a function that takes two arguments,\n the height of the tree to build and the current\n depth in the tree.\n type_: class\n The type that should return the tree when called, when\n :obj:None (default) no return type is enforced.\n\n Returns\n -------\n individual: list\n A grown tree with leaves at possibly different depths\n depending on the condition function.\n ", "language": "en", "n_whitespaces": 317, "n_words": 116, "vocab_size": 75 }
def _generate(self, pset, min_, max_, condition, type_=None): if type_ is None: type_ = pset.ret expr = [] height = np.random.randint(min_, max_) stack = [(0, type_)] while len(stack) != 0: depth, type_ = stack.pop() # We've added a type_ parameter to the condition function if condition(height, depth, type_): try: term = np.random.choice(pset.terminals[type_]) except IndexError: _, _, traceback = sys.exc_info() raise IndexError( "The gp.generate function tried to add " "a terminal of type {}, but there is" "none available. {}".format(type_, traceback) ) if inspect.isclass(term): term = term() expr.append(term) else: try: prim = np.random.choice(pset.primitives[type_]) except IndexError: _, _, traceback = sys.exc_info() raise IndexError( "The gp.generate function tried to add " "a primitive of type {}, but there is" "none available. {}".format(type_, traceback) ) expr.append(prim) for arg in reversed(prim.args): stack.append((depth + 1, arg)) return expr
@pytest.mark.parametrize("kwargs", [{"min_frequency": 21, "max_categories": 1}])
75,664
259,230
484
sklearn/preprocessing/tests/test_encoders.py
252
31
def test_ohe_infrequent_multiple_categories_dtypes(): pd = pytest.importorskip("pandas") X = pd.DataFrame( { "str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"], "int": [5, 3, 0, 10, 10, 12, 0, 3, 5], }, columns=["str", "int"], ) ohe = OneHotEncoder( categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" ) # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be # considered infrequent because they are greater # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. # 0, 3, 12 will be considered infrequent X_trans = ohe.fit_transform(X).toarray() assert_array_equal(ohe.infrequent_categories_[0], ["a", "b"]) assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12]) expected = [ [0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 0, 0], ] assert_allclose(expected, X_trans) X_test = pd.DataFrame({"str": ["b", "f"], "int": [14, 12]}, columns=["str", "int"]) expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]] X_test_trans = ohe.transform(X_test) assert_allclose(expected, X_test_trans.toarray()) X_inv = ohe.inverse_transform(X_test_trans) expected_inv
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
test_ohe_infrequent_multiple_categories_dtypes
7f0006c8aad1a09621ad19c3db19c3ff0555a183
scikit-learn
test_encoders.py
12
46
https://github.com/scikit-learn/scikit-learn.git
1
510
1
119
782
Python
{ "docstring": "Test infrequent categories with a pandas dataframe with multiple dtypes.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def test_ohe_infrequent_multiple_categories_dtypes(): pd = pytest.importorskip("pandas") X = pd.DataFrame( { "str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"], "int": [5, 3, 0, 10, 10, 12, 0, 3, 5], }, columns=["str", "int"], ) ohe = OneHotEncoder( categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" ) # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be # considered infrequent because they are greater # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. # 0, 3, 12 will be considered infrequent X_trans = ohe.fit_transform(X).toarray() assert_array_equal(ohe.infrequent_categories_[0], ["a", "b"]) assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12]) expected = [ [0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 0, 0], ] assert_allclose(expected, X_trans) X_test = pd.DataFrame({"str": ["b", "f"], "int": [14, 12]}, columns=["str", "int"]) expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]] X_test_trans = ohe.transform(X_test) assert_allclose(expected, X_test_trans.toarray()) X_inv = ohe.inverse_transform(X_test_trans) expected_inv = np.array( [["infrequent_sklearn", "infrequent_sklearn"], ["f", "infrequent_sklearn"]], dtype=object, ) assert_array_equal(expected_inv, X_inv) # only infrequent or known categories X_test = pd.DataFrame({"str": ["c", "b"], "int": [12, 5]}, columns=["str", "int"]) X_test_trans = ohe.transform(X_test).toarray() expected = [[1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0]] assert_allclose(expected, X_test_trans) X_inv = ohe.inverse_transform(X_test_trans) expected_inv = np.array( [["c", "infrequent_sklearn"], ["infrequent_sklearn", 5]], dtype=object ) assert_array_equal(expected_inv, X_inv) @pytest.mark.parametrize("kwargs", [{"min_frequency": 21, "max_categories": 1}])
7,261
39,805
157
dash/development/base_component.py
32
15
def _set_random_id(self): if getattr(self, "persistence", False): raise RuntimeError( ) if "dash_snapshots" in sys.modules: raise RuntimeError( ) if not hasattr(self, "id"): v = str(uuid.UUID(int=rd.randint(0, 2 ** 128))) setattr(self, "id", v) return getattr(self, "id")
error when autogenerated IDs are used with persistence or snapshots also give set_random_id a leading underscore so it doesn't need to become a reserved word (disallowed prop name)
_set_random_id
41e322bd17bcbaa34e315b27b8f33f07e6671142
dash
base_component.py
16
26
https://github.com/plotly/dash.git
4
78
0
26
133
Python
{ "docstring": "\n Attempting to use an auto-generated ID with the `persistence` prop.\n This is prohibited because persistence is tied to component IDs and\n auto-generated IDs can easily change.\n\n Please assign an explicit ID to this component.\n \n Attempting to use an auto-generated ID in an app with `dash_snapshots`.\n This is prohibited because snapshots saves the whole app layout,\n including component IDs, and auto-generated IDs can easily change.\n Callbacks referencing the new IDs will not work old snapshots.\n\n Please assign an explicit ID to this component.\n ", "language": "en", "n_whitespaces": 241, "n_words": 82, "vocab_size": 44 }
def _set_random_id(self): if getattr(self, "persistence", False): raise RuntimeError( ) if "dash_snapshots" in sys.modules: raise RuntimeError( ) if not hasattr(self, "id"): v = str(uuid.UUID(int=rd.randint(0, 2 ** 128))) setattr(self, "id", v) return getattr(self, "id")
7,004
38,638
36
src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py
15
3
def final(): head = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("cl
Add CvT (#17299) * Adding cvt files * Adding cvt files * changes in init file * Adding cvt files * changes in init file * Style fixes * Address comments from code review * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Format lists in docstring * Fix copies * Apply suggestion from code review Co-authored-by: AnugunjNaman <[email protected]> Co-authored-by: Ayushman Singh <[email protected]> Co-authored-by: Niels Rogge <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
final
adc0ff25028d29af30386f2d7d3f85e290fbef57
transformers
convert_cvt_original_pytorch_checkpoint_to_pytorch.py
9
7
https://github.com/huggingface/transformers.git
1
51
0
14
98
Python
{ "docstring": "\n Function helps in renaming final classification layer\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
def final(): head = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head
75,191
258,142
50
test/document_stores/test_sql.py
15
9
def test_delete_index(self, ds, documents): ds.write_documents(documents, index="custom_index") assert ds.get_document_count(index="custom_index") == len(documents) ds.delete_index(index="custom_index") assert ds.get_document_count(index="custom_index") == 0
feat: add SQLDocumentStore tests (#3517) * port SQL tests * cleanup document_store_tests.py from sql tests * leftover * Update .github/workflows/tests.yml Co-authored-by: Sara Zan <[email protected]> * review comments * Update test/document_stores/test_base.py Co-authored-by: bogdankostic <[email protected]> Co-authored-by: Sara Zan <[email protected]> Co-authored-by: bogdankostic <[email protected]>
test_delete_index
2bb81331b75aec68de0d45c4cb116170d265f1fe
haystack
test_sql.py
10
5
https://github.com/deepset-ai/haystack.git
1
53
0
12
92
Python
{ "docstring": "Contrary to other Document Stores, SQLDocumentStore doesn't raise if the index is empty", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
def test_delete_index(self, ds, documents): ds.write_documents(documents, index="custom_index") assert ds.get_document_count(index="custom_index") == len(documents) ds.delete_index(index="custom_index") assert ds.get_document_count(index="custom_index") == 0
@frappe.whitelist()
14,118
66,161
25
erpnext/hr/doctype/leave_application/leave_application.py
44
20
def add_holidays(events, start, end, employee, company): applicable_holiday_list = get_holiday_list_for_employee(employee, company) if not applicable_holiday_list: return for holiday in
style: format code with black
add_holidays
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
leave_application.py
16
19
https://github.com/frappe/erpnext.git
3
96
1
41
165
Python
{ "docstring": "select name, holiday_date, description\n\t\tfrom `tabHoliday` where parent=%s and holiday_date between %s and %s", "language": "en", "n_whitespaces": 12, "n_words": 14, "vocab_size": 12 }
def add_holidays(events, start, end, employee, company): applicable_holiday_list = get_holiday_list_for_employee(employee, company) if not applicable_holiday_list: return for holiday in frappe.db.sql( , (applicable_holiday_list, start, end), as_dict=True, ): events.append( { "doctype": "Holiday", "from_date": holiday.holiday_date, "to_date": holiday.holiday_date, "title": _("Holiday") + ": " + cstr(holiday.description), "name": holiday.name, } ) @frappe.whitelist()
42,899
179,091
317
xlib/api/win32/dshow/helper.py
82
38
def get_video_input_devices_names() -> List[str]: # based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device names = [] sys_dev_enum = strmif.ICreateDevEnum() if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmif.ICreateDevEnum.IID, sys_dev_enum) == wintypes.ERROR.SUCCESS: pEnumCat = objidl.IEnumMoniker() if sys_dev_enum.CreateClassEnumerator(uuids.CLSID_VideoInputDeviceCategory, pEnumCat, 0) == wintypes.ERROR.SUCCESS: moniker = objidl.IMoniker() while pEnumCat.Next(1, moniker, None) == wintypes.ERROR.SUCCESS: prop_bag = oaidl.IPropertyBag() if moniker.BindToStorage(None, None, oaidl.IPropertyBag.IID, prop_bag) == wintypes.ERROR.SUCCESS: var = wintypes.VARIANT() hr = prop_bag.Read(wintypes.LPCOLESTR('Description'), var, None ) if hr != wintypes.ERROR.SUCCESS: hr = prop_bag.Read(wintypes.LPCOLESTR('FriendlyName'), var, None ) names.append(var.value.bstrVal.value if hr == wintypes.ERROR.SU
update xlib.api.win32
get_video_input_devices_names
2be32787538f1b0ef83f648ee60d2d4d4868d3fd
DeepFaceLive
helper.py
21
25
https://github.com/iperov/DeepFaceLive.git
7
230
0
55
363
Python
{ "docstring": "\n returns a list of available names of VideoInputDevice's\n\n ole32 should be initialized before use\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 13 }
def get_video_input_devices_names() -> List[str]: # based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device names = [] sys_dev_enum = strmif.ICreateDevEnum() if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmif.ICreateDevEnum.IID, sys_dev_enum) == wintypes.ERROR.SUCCESS: pEnumCat = objidl.IEnumMoniker() if sys_dev_enum.CreateClassEnumerator(uuids.CLSID_VideoInputDeviceCategory, pEnumCat, 0) == wintypes.ERROR.SUCCESS: moniker = objidl.IMoniker() while pEnumCat.Next(1, moniker, None) == wintypes.ERROR.SUCCESS: prop_bag = oaidl.IPropertyBag() if moniker.BindToStorage(None, None, oaidl.IPropertyBag.IID, prop_bag) == wintypes.ERROR.SUCCESS: var = wintypes.VARIANT() hr = prop_bag.Read(wintypes.LPCOLESTR('Description'), var, None ) if hr != wintypes.ERROR.SUCCESS: hr = prop_bag.Read(wintypes.LPCOLESTR('FriendlyName'), var, None ) names.append(var.value.bstrVal.value if hr == wintypes.ERROR.SUCCESS else 'unnamed') prop_bag.Release() moniker.Release() pEnumCat.Release() sys_dev_enum.Release() return names
5,565
30,421
15
spotdl/utils/console.py
6
4
def check_for_updates(): version_message = get_update_status() print(version_message)
moved console actions to a new file
check_for_updates
deca40c2e26afed62e1f9ec4be14aff9e125929b
spotify-downloader
console.py
8
3
https://github.com/spotDL/spotify-downloader.git
1
14
0
6
28
Python
{ "docstring": "\n Check for updates to the current version.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
def check_for_updates(): version_message = get_update_status() print(version_message)
75,544
259,052
491
sklearn/preprocessing/_polynomial.py
101
24
def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None): if knots == "quantile": percentiles = 100 * np.linspace( start=0, stop=1, num=n_knots, dtype=np.float64 ) if sample_weight is None: knots = np.percentile(X, percentiles, axis=0) else: knots = np.array( [ _weighted_percentile(X, sample_weight, percentile) for percentile in percentiles ] ) else: # knots == 'uniform': # Note that the variable `knots` has already been validated an
MNT Clean fixes and compat for old versions of our dependencies (#22642) Co-authored-by: Olivier Grisel <[email protected]>
_get_base_knot_positions
34f9dbf54164e3c62d68765fe45f27f067a45562
scikit-learn
_polynomial.py
16
26
https://github.com/scikit-learn/scikit-learn.git
5
172
0
72
259
Python
{ "docstring": "Calculate base knot positions.\n\n Base knots such that first knot <= feature <= last knot. For the\n B-spline construction with scipy.interpolate.BSpline, 2*degree knots\n beyond the base interval are added.\n\n Returns\n -------\n knots : ndarray of shape (n_knots, n_features), dtype=np.float64\n Knot positions (points) of base interval.\n ", "language": "en", "n_whitespaces": 105, "n_words": 45, "vocab_size": 37 }
def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None): if knots == "quantile": percentiles = 100 * np.linspace( start=0, stop=1, num=n_knots, dtype=np.float64 ) if sample_weight is None: knots = np.percentile(X, percentiles, axis=0) else: knots = np.array( [ _weighted_percentile(X, sample_weight, percentile) for percentile in percentiles ] ) else: # knots == 'uniform': # Note that the variable `knots` has already been validated and # `else` is therefore safe. # Disregard observations with zero weight. mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0 x_min = np.amin(X[mask], axis=0) x_max = np.amax(X[mask], axis=0) knots = np.linspace( start=x_min, stop=x_max, num=n_knots, endpoint=True, dtype=np.float64, ) return knots
81,090
273,161
102
keras/layers/preprocessing/index_lookup.py
27
16
def _num_tokens(self, data):
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_num_tokens
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
index_lookup.py
13
9
https://github.com/keras-team/keras.git
3
71
0
20
113
Python
{ "docstring": "Count the number of tokens in a ragged, sparse or dense tensor.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def _num_tokens(self, data): if tf_utils.is_sparse(data): flat_values = data.values elif tf_utils.is_ragged(data): flat_values = data.flat_values else: flat_values = tf.reshape(data, [-1]) tokens, _, counts = tf.unique_with_counts(flat_values, out_idx=tf.int64) return tokens, counts
@pytest.fixture(name="awair_offline", scope="session")
102,571
303,762
11
tests/components/awair/conftest.py
6
8
def no_devicess_fixture(): return jso
Add Awair Local API support (#75535)
no_devicess_fixture
ebbff7b60e43f17d65ead811d314602b9daddfc4
core
conftest.py
10
2
https://github.com/home-assistant/core.git
1
15
1
6
54
Python
{ "docstring": "Fixture representing when no devices are found in Awair's cloud API.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def no_devicess_fixture(): return json.loads(load_fixture("awair/no_devices.json")) @pytest.fixture(name="awair_offline", scope="session")
@pytest.mark.parametrize( "values, exp_any, exp_all, exp_any_noskip, exp_all_noskip", [ ([True, pd.NA], True, True, True, pd.NA), ([False, pd.NA], False, False, pd.NA, False), ([pd.NA], False, True, pd.NA, pd.NA), ([], False, True, False, True), # GH-33253: all True / all False values buggy with skipna=False ([True, True], True, True, True, True), ([False, False], False, False, False, False), ], )
39,876
166,944
155
pandas/tests/arrays/boolean/test_reduction.py
76
10
def data(): return pd.array( [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], dtyp
DOC: Added docstrings to fixtures defined in array module (#47211)
data
89be1f053b695c4ce1c0569f737caf3f03c12128
pandas
test_reduction.py
13
5
https://github.com/pandas-dev/pandas.git
1
49
1
44
223
Python
{ "docstring": "Fixture returning boolean array, with valid and missing values.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def data(): return pd.array( [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], dtype="boolean", ) @pytest.mark.parametrize( "values, exp_any, exp_all, exp_any_noskip, exp_all_noskip", [ ([True, pd.NA], True, True, True, pd.NA), ([False, pd.NA], False, False, pd.NA, False), ([pd.NA], False, True, pd.NA, pd.NA), ([], False, True, False, True), # GH-33253: all True / all False values buggy with skipna=False ([True, True], True, True, True, True), ([False, False], False, False, False, False), ], )
20,477
101,038
92
scripts/train.py
19
8
def should_toggle_mask(self) -> bool: with self._lock: retval = self._toggle_mask if retval: logger.debug("Sending toggle mask") self._toggle_mask = False
Live Preview - Replace cv2 with matplotlib viewer
should_toggle_mask
7b9fc0454d982a2425ec44e90e5b05a87d149953
faceswap
train.py
12
14
https://github.com/deepfakes/faceswap.git
2
34
0
16
62
Python
{ "docstring": " Check whether the mask should be toggled and return the value. If ``True`` is returned\n then resets :attr:`_toggle_mask` back to ``False``\n\n Returns\n -------\n bool\n ``True`` if the mask should be toggled otherwise ``False``. ", "language": "en", "n_whitespaces": 73, "n_words": 33, "vocab_size": 26 }
def should_toggle_mask(self) -> bool: with self._lock: retval = self._toggle_mask if retval: logger.debug("Sending toggle mask") self._toggle_mask = False return retval
75,964
259,877
106
examples/linear_model/plot_tweedie_regression_insurance_claims.py
57
27
def load_mtpl2(n_samples=100000): # freMTPL2freq dataset
ENH improve ARFF parser using pandas (#21938) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Adrin Jalali <[email protected]>
load_mtpl2
a47d569e670fd4102af37c3165c9b1ddf6fd3005
scikit-learn
plot_tweedie_regression_insurance_claims.py
12
11
https://github.com/scikit-learn/scikit-learn.git
2
145
0
43
242
Python
{ "docstring": "Fetch the French Motor Third-Party Liability Claims dataset.\n\n Parameters\n ----------\n n_samples: int, default=100000\n number of samples to select (for faster run time). Full dataset has\n 678013 samples.\n ", "language": "en", "n_whitespaces": 49, "n_words": 27, "vocab_size": 27 }
def load_mtpl2(n_samples=100000): # freMTPL2freq dataset from https://www.openml.org/d/41214 df_freq = fetch_openml(data_id=41214, as_frame=True, parser="pandas").data df_freq["IDpol"] = df_freq["IDpol"].astype(int) df_freq.set_index("IDpol", inplace=True) # freMTPL2sev dataset from https://www.openml.org/d/41215 df_sev = fetch_openml(data_id=41215, as_frame=True, parser="pandas").data # sum ClaimAmount over identical IDs df_sev = df_sev.groupby("IDpol").sum() df = df_freq.join(df_sev, how="left") df["ClaimAmount"].fillna(0, inplace=True) # unquote string fields for column_name in df.columns[df.dtypes.values == object]: df[column_name] = df[column_name].str.strip("'") return df.iloc[:n_samples]
20,043
100,579
100
lib/gpu_stats/nvidia.py
32
10
def _get_driver(self) -> str: try: driver = pynvml.nvmlSystemGetDriverVersion().decode("utf-8") except pynvml.NVMLError as err: self._log("debug", f"Unable to obtain driver. Original error: {str(err)}") driver = "No Nvidia driver found" self._log("debug", f"GPU Driver: {driver}") return driver
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
_get_driver
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
faceswap
nvidia.py
14
15
https://github.com/deepfakes/faceswap.git
2
52
0
27
109
Python
{ "docstring": " Obtain the Nvidia driver version currently in use.\n\n Returns\n -------\n str\n The current GPU driver version\n ", "language": "en", "n_whitespaces": 56, "n_words": 16, "vocab_size": 14 }
def _get_driver(self) -> str: try: driver = pynvml.nvmlSystemGetDriverVersion().decode("utf-8") except pynvml.NVMLError as err: self._log("debug", f"Unable to obtain driver. Original error: {str(err)}") driver = "No Nvidia driver found" self._log("debug", f"GPU Driver: {driver}") return driver
50,862
204,734
98
django/core/serializers/__init__.py
29
9
def _load_serializers(): global _serializers serializers = {} for forma
Refs #33476 -- Reformatted code with Black.
_load_serializers
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
__init__.py
13
11
https://github.com/django/django.git
4
58
0
22
91
Python
{ "docstring": "\n Register built-in and settings-defined serializers. This is done lazily so\n that user code has a chance to (e.g.) set up custom settings without\n needing to be careful of import order.\n ", "language": "en", "n_whitespaces": 43, "n_words": 30, "vocab_size": 29 }
def _load_serializers(): global _serializers serializers = {} for format in BUILTIN_SERIALIZERS: register_serializer(format, BUILTIN_SERIALIZERS[format], serializers) if hasattr(settings, "SERIALIZATION_MODULES"): for format in settings.SERIALIZATION_MODULES: register_serializer( format, settings.SERIALIZATION_MODULES[format], serializers ) _serializers = serializers
53,626
213,075
66
samtranslator/utils/py27hash_fix.py
12
6
def pop(self): if self.keyorder: value = self.keys()[0] self.remove(value) return value return N
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <[email protected]>
pop
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
serverless-application-model
py27hash_fix.py
11
6
https://github.com/aws/serverless-application-model.git
2
31
0
10
53
Python
{ "docstring": "\n Pops the top element from the sorted keys if it exists. Returns None otherwise.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
def pop(self): if self.keyorder: value = self.keys()[0] self.remove(value) return value return None
40,862
173,550
349
magenta/models/onsets_frames_transcription/infer_util.py
167
29
def probs_to_pianoroll_viterbi(frame_probs, onset_probs, alpha=0.5): n, d = onset_probs.shape loss_matrix = np.zeros([n, d, 2], dtype=float) path_matrix = np.zeros([n, d, 2], dtype=bool) frame_losses = (1 - alpha) * -np.log(np.stack([1 - frame_probs, frame_probs], axis=-1)) onset_losses = alpha * -np.log(np.stack([1 - onset_probs, onset_probs], axis=-1)) loss_matrix[0, :, :] = frame_losses[0, :, :] + onset_losses[0, :, :] for i in range(1, n): transition_loss = np.tile(loss_matrix[i - 1, :, :][:, :, np.newaxis], [1, 1, 2]) transition_loss[:, 0, 0] += onset_losses[i, :, 0] transition_loss[:, 0, 1] += onset_losses[i, :, 1] transition_loss[:, 1, 0] += onset_losses[i, :, 0] transition_loss[:, 1, 1] += onset_losses[i, :, 0] path_matrix[i, :, :] = np.argmin(transition_loss, axis=1) loss_matrix[i, :, 0] = transition_loss[ np.arange(d), path_matrix[i, :, 0].astype(int), 0] loss_matrix[i, :, 1] = transition_loss[ np.arange(d), path_matrix[i, :, 1].astype(int), 1] loss_matrix[i, :, :] += frame_losses[i, :, :] pianoroll = np.zeros([n, d], dtype=bool) pianoroll[n - 1, :] = np.argmin(loss_matrix[n - 1, :, :], axis=-1) for i in range(n - 2, -1, -1): pianoroll[i, :] = path_matrix[ i + 1, np.arange(d), pi
[NumPy] Remove references to deprecated NumPy type aliases. This change replaces references to a number of deprecated NumPy type aliases (np.bool, np.int, np.float, np.complex, np.object, np.str) with their recommended replacement (bool, int, float, complex, object, str). NumPy 1.24 drops the deprecated aliases, so we must remove uses before updating NumPy. PiperOrigin-RevId: 497026048
probs_to_pianoroll_viterbi
52828dc160781f422e670d414406ffe91c30066b
magenta
infer_util.py
14
28
https://github.com/magenta/magenta.git
3
454
0
75
649
Python
{ "docstring": "Viterbi decoding of frame & onset probabilities to pianoroll.\n\n Args:\n frame_probs: A numpy array (num-frames-by-num-pitches) of frame\n probabilities.\n onset_probs: A numpy array (num-frames-by-num-pitches) of onset\n probabilities.\n alpha: Relative weight of onset and frame loss, a float between 0 and 1.\n With alpha = 0, onset probabilities will be ignored. With alpha = 1, frame\n probabilities will be ignored.\n\n Returns:\n A numpy array (num-frames-by-num-pitches) representing the boolean-valued\n pianoroll.\n ", "language": "en", "n_whitespaces": 105, "n_words": 67, "vocab_size": 39 }
def probs_to_pianoroll_viterbi(frame_probs, onset_probs, alpha=0.5): n, d = onset_probs.shape loss_matrix = np.zeros([n, d, 2], dtype=float) path_matrix = np.zeros([n, d, 2], dtype=bool) frame_losses = (1 - alpha) * -np.log(np.stack([1 - frame_probs, frame_probs], axis=-1)) onset_losses = alpha * -np.log(np.stack([1 - onset_probs, onset_probs], axis=-1)) loss_matrix[0, :, :] = frame_losses[0, :, :] + onset_losses[0, :, :] for i in range(1, n): transition_loss = np.tile(loss_matrix[i - 1, :, :][:, :, np.newaxis], [1, 1, 2]) transition_loss[:, 0, 0] += onset_losses[i, :, 0] transition_loss[:, 0, 1] += onset_losses[i, :, 1] transition_loss[:, 1, 0] += onset_losses[i, :, 0] transition_loss[:, 1, 1] += onset_losses[i, :, 0] path_matrix[i, :, :] = np.argmin(transition_loss, axis=1) loss_matrix[i, :, 0] = transition_loss[ np.arange(d), path_matrix[i, :, 0].astype(int), 0] loss_matrix[i, :, 1] = transition_loss[ np.arange(d), path_matrix[i, :, 1].astype(int), 1] loss_matrix[i, :, :] += frame_losses[i, :, :] pianoroll = np.zeros([n, d], dtype=bool) pianoroll[n - 1, :] = np.argmin(loss_matrix[n - 1, :, :], axis=-1) for i in range(n - 2, -1, -1): pianoroll[i, :] = path_matrix[ i + 1, np.arange(d), pianoroll[i + 1, :].astype(int)] return pianoroll
4,129
22,040
41
pipenv/patched/pip/_vendor/requests/_internal_utils.py
15
7
def to_native_string(string, encoding="ascii"): if isinstance(string, builtin_str): out = string else: out = string.decode(encodin
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
to_native_string
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
_internal_utils.py
11
6
https://github.com/pypa/pipenv.git
2
33
0
12
57
Python
{ "docstring": "Given a string object, regardless of type, returns a representation of\n that string in the native string type, encoding and decoding where\n necessary. This assumes ASCII unless told otherwise.\n ", "language": "en", "n_whitespaces": 38, "n_words": 29, "vocab_size": 24 }
def to_native_string(string, encoding="ascii"): if isinstance(string, builtin_str): out = string else: out = string.decode(encoding) return out
48,365
197,177
86
sympy/parsing/mathematica.py
20
11
def mathematica(s, additional_translations=None): parser = MathematicaParser(additional_translations) if additional_translations
Adapt to new deprecation policy
mathematica
cddb6451ed54ab1f84cffb5313cbff709bbaf8e5
sympy
mathematica.py
11
11
https://github.com/sympy/sympy.git
2
52
0
19
88
Python
{ "docstring": "\n Translate a string containing a Wolfram Mathematica expression to a SymPy\n expression.\n\n If the translator is unable to find a suitable SymPy expression, the\n ``FullForm`` of the Mathematica expression will be output, using SymPy\n ``Function`` objects as nodes of the syntax tree.\n\n Examples\n ========\n\n >>> from sympy.parsing.mathematica import mathematica\n >>> mathematica(\"Sin[x]^2 Tan[y]\")\n sin(x)**2*tan(y)\n >>> e = mathematica(\"F[7,5,3]\")\n >>> e\n F(7, 5, 3)\n >>> from sympy import Function, Max, Min\n >>> e.replace(Function(\"F\"), lambda *x: Max(*x)*Min(*x))\n 21\n\n Both standard input form and Mathematica full form are supported:\n\n >>> mathematica(\"x*(a + b)\")\n x*(a + b)\n >>> mathematica(\"Times[x, Plus[a, b]]\")\n x*(a + b)\n\n To get a matrix from Wolfram's code:\n\n >>> m = mathematica(\"{{a, b}, {c, d}}\")\n >>> m\n ((a, b), (c, d))\n >>> from sympy import Matrix\n >>> Matrix(m)\n Matrix([\n [a, b],\n [c, d]])\n\n If the translation into equivalent SymPy expressions fails, an SymPy\n expression equivalent to Wolfram Mathematica's \"FullForm\" will be created:\n\n >>> mathematica(\"x_.\")\n Optional(Pattern(x, Blank()))\n >>> mathematica(\"Plus @@ {x, y, z}\")\n Apply(Plus, (x, y, z))\n >>> mathematica(\"f[x_, 3] := x^3 /; x > 0\")\n SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0))\n The ``additional_translations`` parameter for the Mathematica parser is now deprecated.\nUse SymPy's .replace( ) or .subs( ) methods on the output expression instead.", "language": "en", "n_whitespaces": 319, "n_words": 203, "vocab_size": 142 }
def mathematica(s, additional_translations=None): parser = MathematicaParser(additional_translations) if additional_translations is not None: sympy_deprecation_warning( , deprecated_since_version="1.11", active_deprecations_target="mathematica-parser-additional-translations", ) return sympify(parser._parse_old(s)) return parser.parse(s)
47,072
194,779
635
parlai/scripts/generate_model_card.py
262
38
def evaluation(self): # adding info about the eval tasks if self.eval_tasks == self.train_tasks: msg = "For evalution, we used the same training datasets; check the [Datasets Used](#datasets-used) section for more information" eval_list = '' else: msg = f"This model was evaluated on the datasets below (use the `parlai display_data` commands to show data). Visit the {make_link('task (dataset) list', task_site)} for more details about the datasets.\n" eval_list = get_dataset_info(self.eval_tasks) eval_list = '\n' + '\n'.join(eval_list) content = [msg + eval_list] # validation metric info: getting metric name and description splitted = re.sub(r'_+', ' ', self.valid_metric).split() key = splitted[-1] if extra_metric_info.get(key): mname, description = extra_metric_info[key] elif METRICS_DISPLAY_DATA.get(key): mname = METRICS_DISPLAY_DATA[key].title description = METRICS_DISPLAY_DATA[key].description else: description, mname = (None, None) # adding description for validation metric and re-wording it: msg = f"\n\nWe used the metric {metric_format(self.valid_metric)}" if len(splitted) == 3 and splitted[0] == 'class' and mname: msg += f", the {mname.lower()} scores for the class {splitted[1]}" content.append(msg + ' as the validation metric. ') if description: description = description[0].lower() + description[1:] content[-1] += f"Recall that `{self.valid_metric}` is {description}." # evaluation table # getting list of subtasks and making columns eval_tasks = self.eval_tasks if len(self.eval_tasks) > 1:
autoformat (#4378)
evaluation
81f722d29045a7a5841d0931a082ded1d1f13863
ParlAI
generate_model_card.py
14
36
https://github.com/facebookresearch/ParlAI.git
14
318
0
159
605
Python
{ "docstring": "\n returns a section with dataset info about the eval tasks if they exist,\n information about the validation metric if it exists, and create a table with\n the validation metric.\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 22 }
def evaluation(self): # adding info about the eval tasks if self.eval_tasks == self.train_tasks: msg = "For evalution, we used the same training datasets; check the [Datasets Used](#datasets-used) section for more information" eval_list = '' else: msg = f"This model was evaluated on the datasets below (use the `parlai display_data` commands to show data). Visit the {make_link('task (dataset) list', task_site)} for more details about the datasets.\n" eval_list = get_dataset_info(self.eval_tasks) eval_list = '\n' + '\n'.join(eval_list) content = [msg + eval_list] # validation metric info: getting metric name and description splitted = re.sub(r'_+', ' ', self.valid_metric).split() key = splitted[-1] if extra_metric_info.get(key): mname, description = extra_metric_info[key] elif METRICS_DISPLAY_DATA.get(key): mname = METRICS_DISPLAY_DATA[key].title description = METRICS_DISPLAY_DATA[key].description else: description, mname = (None, None) # adding description for validation metric and re-wording it: msg = f"\n\nWe used the metric {metric_format(self.valid_metric)}" if len(splitted) == 3 and splitted[0] == 'class' and mname: msg += f", the {mname.lower()} scores for the class {splitted[1]}" content.append(msg + ' as the validation metric. ') if description: description = description[0].lower() + description[1:] content[-1] += f"Recall that `{self.valid_metric}` is {description}." # evaluation table # getting list of subtasks and making columns eval_tasks = self.eval_tasks if len(self.eval_tasks) > 1: eval_tasks.insert(0, 'All') columns = [' '] + [taskname(subtask) for subtask in eval_tasks] # only one row: validation row = [metric_format(self.valid_metric)] for subtask in eval_tasks: # creating the key to get metric and formatting pre = '' if subtask == 'All' or len(eval_tasks) == 1 else subtask + '/' key = pre + self.valid_metric fmt = '{:.4f}' if self.valid_metric in not_percent else '{:.2%}' row.append(fmt.format(self.eval_results[key])) return '\n'.join(content) + '\n\n' + '\n'.join(make_md_table([row], columns))
@tf_test_utils.with_eager_op_as_function
80,977
272,207
284
keras/integration_test/gradient_checkpoint_test.py
110
42
def _train_with_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) # This model is the same model as _get_big_cnn_model but split into 3 parts. models = _get_split_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) model1, model2, model3 = models # Appl
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_train_with_recompute
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
gradient_checkpoint_test.py
13
28
https://github.com/keras-team/keras.git
2
176
1
82
288
Python
{ "docstring": "Trains a single large model with gradient checkpointing using tf.recompute_grad.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def _train_with_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) # This model is the same model as _get_big_cnn_model but split into 3 parts. models = _get_split_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) model1, model2, model3 = models # Apply gradient checkpointing to the submodels using tf.recompute_grad. model1_re = tf.recompute_grad(model1) model2_re = tf.recompute_grad(model2) model3_re = tf.recompute_grad(model3) optimizer = optimizers.SGD() tr_vars = ( model1.trainable_variables + model2.trainable_variables + model3.trainable_variables ) losses = [] for _ in range(n_steps): with tf.GradientTape() as tape: logits1 = model1_re(x) logits2 = model2_re(logits1) logits3 = model3_re(logits2) loss = _compute_loss(logits3, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses @tf_test_utils.with_eager_op_as_function
30,200
134,124
100
python/ray/tune/tests/test_syncer_callback.py
45
25
def test_syncer_callback_dead_node_log_error(caplog, ray_start_2_cpus, temp_data_dirs): caplog.set_level(logging.ERROR, logger="ray.tune.syncer") tmp_source, tmp_target = temp_data_dirs syncer_callback = TestSyncerCallback( sync_period=0, local_logdir_override=tmp_target, ) trial1 = MockTrial(trial_id="a", logdir=tmp_source, on_dead_node=True) syncer_callback.on_trial_result(iteration=1, trials=[], trial=trial1, result={}) assert ( "An erro
[Tune] Catch SyncerCallback failure with dead node (#29438) ### Context This issue was uncovered by this long running test: `long_running_distributed_pytorch_pbt_failure`. This test randomly kills nodes via `FailureInjectorCallback`, and the test failure happens when: 1. A trial result comes in and is processed 2. The node this trial is running on is requested to be killed by the failure injector 3. The driver's syncer callback runs on the on_trial_result event 4. The node dies 5. The driver is in the middle of syncing, trying to access the node ip, which errors ### What's in this PR? 1. Gracefully handle this race condition by catching the error thrown by the sync operation on a dead node 2. Log an error to the user 3. Adds a test for this sync with dead node scenario Signed-off-by: Justin Yu <[email protected]>
test_syncer_callback_dead_node_log_error
fc9f8e458c4dad7a51e0d781917b1a003cb55cd7
ray
test_syncer_callback.py
10
13
https://github.com/ray-project/ray.git
1
86
0
42
135
Python
{ "docstring": "Check that we catch + log errors when trying syncing with a dead remote node.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
def test_syncer_callback_dead_node_log_error(caplog, ray_start_2_cpus, temp_data_dirs): caplog.set_level(logging.ERROR, logger="ray.tune.syncer") tmp_source, tmp_target = temp_data_dirs syncer_callback = TestSyncerCallback( sync_period=0, local_logdir_override=tmp_target, ) trial1 = MockTrial(trial_id="a", logdir=tmp_source, on_dead_node=True) syncer_callback.on_trial_result(iteration=1, trials=[], trial=trial1, result={}) assert ( "An error occurred when trying to get the node ip where this trial is running" in caplog.text )
42,354
177,335
81
networkx/linalg/modularitymatrix.py
44
18
def directed_modularity_matrix(G, nodelist=None, weight=None): import numpy as np if nodelist is None: nodelist = list(G)
Use scipy.sparse array datastructure (#6037) * Use scipy.sparse array datastructure * Add reminder to rm wrapper when scipy adds creation fns. * Rm mention of np matrix from code comment. * Update networkx/algorithms/bipartite/matrix.py Co-authored-by: Stefan van der Walt <[email protected]> Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Stefan van der Walt <[email protected]>
directed_modularity_matrix
8a325d26aa7fdd3a72580c4720fa97f971bbefcb
networkx
modularitymatrix.py
10
10
https://github.com/networkx/networkx.git
2
92
0
35
147
Python
{ "docstring": "Returns the directed modularity matrix of G.\n\n The modularity matrix is the matrix B = A - <A>, where A is the adjacency\n matrix and <A> is the expected adjacency matrix, assuming that the graph\n is described by the configuration model.\n\n More specifically, the element B_ij of B is defined as\n\n .. math::\n B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m\n\n where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree\n of node j, with m the number of edges in the graph. When weight is set\n to a name of an attribute edge, Aij, k_i, k_j and m are computed using\n its value.\n\n Parameters\n ----------\n G : DiGraph\n A NetworkX DiGraph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default=None)\n The edge attribute that holds the numerical value used for\n the edge weight. If None then all edge weights are 1.\n\n Returns\n -------\n B : Numpy array\n The modularity matrix of G.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_edges_from(\n ... (\n ... (1, 2),\n ... (1, 3),\n ... (3, 1),\n ... (3, 2),\n ... (3, 5),\n ... (4, 5),\n ... (4, 6),\n ... (5, 4),\n ... (5, 6),\n ... (6, 4),\n ... )\n ... )\n >>> B = nx.directed_modularity_matrix(G)\n\n\n Notes\n -----\n NetworkX defines the element A_ij of the adjacency matrix as 1 if there\n is a link going from node i to node j. Leicht and Newman use the opposite\n definition. This explains the different expression for B_ij.\n\n See Also\n --------\n to_numpy_array\n modularity_spectrum\n adjacency_matrix\n modularity_matrix\n\n References\n ----------\n .. [1] E. A. Leicht, M. E. J. Newman,\n \"Community structure in directed networks\",\n Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008.\n ", "language": "en", "n_whitespaces": 598, "n_words": 303, "vocab_size": 177 }
def directed_modularity_matrix(G, nodelist=None, weight=None): import numpy as np if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") k_in = A.sum(axis=0) k_out = A.sum(axis=1) m = k_in.sum() # Expected adjacency matrix X = np.outer(k_out, k_in) / m return A - X
36,452
155,706
141
dask/dataframe/io/parquet/core.py
24
9
def project_columns(self, columns): if columns == se
Use map_partitions (Blockwise) in to_parquet (#8487)
project_columns
d98c1dd63e0d7f6a003e3ff70eca796c19b81d42
dask
core.py
8
12
https://github.com/dask/dask.git
2
45
0
23
66
Python
{ "docstring": "Return a new ParquetFunctionWrapper object\n with a sub-column projection.\n ", "language": "en", "n_whitespaces": 23, "n_words": 9, "vocab_size": 8 }
def project_columns(self, columns): if columns == self.columns: return self return ParquetFunctionWrapper( self.engine, self.fs, self.meta, columns, self.index, None, # Already merged into common_kwargs self.common_kwargs, )
48,818
198,198
193
sympy/tensor/array/expressions/array_expressions.py
61
28
def sort_args_by_name(self): expr = self.expr if not isinstance(expr, ArrayTensorProduct): return self arg
Rename files for array expression conversions in order to avoid naming conflicts in TAB-completion of the corresponding functions
sort_args_by_name
a69c49bec6caf2cb460dc4eedf0fec184db92f0e
sympy
array_expressions.py
13
16
https://github.com/sympy/sympy.git
5
135
0
46
211
Python
{ "docstring": "\n Sort arguments in the tensor product so that their order is lexicographical.\n\n Examples\n ========\n\n >>> from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array\n >>> from sympy import MatrixSymbol\n >>> from sympy.abc import N\n >>> A = MatrixSymbol(\"A\", N, N)\n >>> B = MatrixSymbol(\"B\", N, N)\n >>> C = MatrixSymbol(\"C\", N, N)\n >>> D = MatrixSymbol(\"D\", N, N)\n\n >>> cg = convert_matrix_to_array(C*D*A*B)\n >>> cg\n ArrayContraction(ArrayTensorProduct(A, D, C, B), (0, 3), (1, 6), (2, 5))\n >>> cg.sort_args_by_name()\n ArrayContraction(ArrayTensorProduct(A, D, B, C), (0, 3), (1, 4), (2, 7))\n ", "language": "en", "n_whitespaces": 194, "n_words": 81, "vocab_size": 51 }
def sort_args_by_name(self): expr = self.expr if not isinstance(expr, ArrayTensorProduct): return self args = expr.args sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1])) pos_sorted, args_sorted = zip(*sorted_data) reordering_map = {i: pos_sorted.index(i) for i, arg in enumerate(args)} contraction_tuples = self._get_contraction_tuples() contraction_tuples = [[(reordering_map[j], k) for j, k in i] for i in contraction_tuples] c_tp = _array_tensor_product(*args_sorted) new_contr_indices = self._contraction_tuples_to_contraction_indices( c_tp, contraction_tuples ) return _array_contraction(c_tp, *new_contr_indices)
29,944
133,158
93
python/ray/util/iter.py
36
6
def gather_async(self, batch_ms=0, num_async=1) -> "LocalIterator[T]": if num_async < 1: raise ValueError("queue depth must be positive") if batch_ms < 0:
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
gather_async
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
iter.py
10
33
https://github.com/ray-project/ray.git
3
61
0
30
70
Python
{ "docstring": "Returns a local iterable for asynchronous iteration.\n\n New items will be fetched from the shards asynchronously as soon as\n the previous one is computed. Items arrive in non-deterministic order.\n\n Arguments:\n batch_ms (int): Batches items for batch_ms milliseconds\n on each shard before retrieving it.\n Increasing batch_ms increases latency but improves throughput.\n If this value is 0, then items are returned immediately.\n num_async (int): The max number of async requests in flight\n per actor. Increasing this improves the amount of pipeline\n parallelism in the iterator.\n\n Examples:\n >>> it = from_range(100, 1).gather_async()\n >>> next(it)\n ... 3\n >>> next(it)\n ... 0\n >>> next(it)\n ... 1\n ", "language": "en", "n_whitespaces": 310, "n_words": 101, "vocab_size": 77 }
def gather_async(self, batch_ms=0, num_async=1) -> "LocalIterator[T]": if num_async < 1: raise ValueError("queue depth must be positive") if batch_ms < 0: raise ValueError("batch time must be positive") # Forward reference to the returned iterator. local_iter = None
33,585
146,010
87
python/ray/ml/tests/test_checkpoints.py
24
13
def test_dict_checkpoint_dict(self): checkpoint = self._prepare_dict_checkpoint() # Convert into dict checkpoint data_dict = checkpoint.to_dict() self.assertIsInstance(data_dict, dict) # Create from dict checkpoint = Checkpoint.from_dict(da
[ml] Add Ray ML / AIR checkpoint implementation (#22691) This PR splits up the changes in #22393 and introduces an implementation of the ML Checkpoint interface used by Ray Tune. This means, the TuneCheckpoint class implements the to/from_[bytes|dict|directory|object_ref|uri] conversion functions, as well as more high-level functions to transition between the different TuneCheckpoint classes. It also includes test cases for Tune's main conversion modes, i.e. dict - intermediate - dict and fs - intermediate - fs. These changes will be the basis for refactoring the tune interface to use TuneCheckpoint objects instead of TrialCheckpoints (externally) and instead of paths/objects (internally).
test_dict_checkpoint_dict
b267be475863a66e9feedb2be5f0a30a2ed8c493
ray
test_checkpoints.py
8
7
https://github.com/ray-project/ray.git
1
50
0
18
87
Python
{ "docstring": "Test conversion from dict to dict checkpoint and back.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
def test_dict_checkpoint_dict(self): checkpoint = self._prepare_dict_checkpoint() # Convert into dict checkpoint data_dict = checkpoint.to_dict() self.assertIsInstance(data_dict, dict) # Create from dict checkpoint = Checkpoint.from_dict(data_dict) self.assertTrue(checkpoint._data_dict) self._assert_dict_checkpoint(checkpoint)
25,918
117,189
196
tests/integration_tests/flows/test_company_independent.py
29
18
def test_5_model(self): query = predict_query = for
Projects structure (#3532) Projects structure
test_5_model
7c02e15aa403a4ca1fa34489dd2df9136d6c961c
mindsdb
test_company_independent.py
13
23
https://github.com/mindsdb/mindsdb.git
2
90
0
24
142
Python
{ "docstring": "\n CREATE MODEL mindsdb.model_{}\n FROM test_integration_{} (\n select * from test_data.home_rentals limit 50\n ) PREDICT rental_price\n USING join_learn_process=true, time_aim=5\n \n select * from mindsdb.model_{} where sqft = 100\n ", "language": "en", "n_whitespaces": 112, "n_words": 26, "vocab_size": 22 }
def test_5_model(self): query = predict_query = for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: self.sql_via_http( query.format(char, char), company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) response = self.sql_via_http( predict_query.format(char), company_id=cid, expected_resp_type=RESPONSE_TYPE.TABLE ) self.assertTrue(len(response['data']), 1)
69,840
242,342
182
src/PIL/Image.py
59
12
def putpalette(self, data, rawmode="RGB"): from . import ImagePalette if self.mode not in ("L", "LA", "P", "PA"): raise ValueError("illegal image mode") if isinstance(data, ImagePalette.ImagePalette): palette = ImagePalette.raw(data.rawmode, data.palette) else: if not isinstance(data, bytes): data = bytes(data) palette = ImagePalette.raw(rawmode, data) self.mode = "PA" if "A" in self.mode else "P" self.palette = palette self.palette.mode =
Attach RGBA palettes from putpalette() when suitable
putpalette
9cdb0508b6cbd3a3061017760a5eab4d13c3924a
Pillow
Image.py
13
14
https://github.com/python-pillow/Pillow.git
5
118
0
43
204
Python
{ "docstring": "\n Attaches a palette to this image. The image must be a \"P\", \"PA\", \"L\"\n or \"LA\" image.\n\n The palette sequence must contain at most 256 colors, made up of one\n integer value for each channel in the raw mode.\n For example, if the raw mode is \"RGB\", then it can contain at most 768\n values, made up of red, green and blue values for the corresponding pixel\n index in the 256 colors.\n If the raw mode is \"RGBA\", then it can contain at most 1024 values,\n containing red, green, blue and alpha values.\n\n Alternatively, an 8-bit string may be used instead of an integer sequence.\n\n :param data: A palette sequence (either a list or a string).\n :param rawmode: The raw mode of the palette. Either \"RGB\", \"RGBA\", or a mode\n that can be transformed to \"RGB\" or \"RGBA\" (e.g. \"R\", \"BGR;15\", \"RGBA;L\").\n ", "language": "en", "n_whitespaces": 245, "n_words": 142, "vocab_size": 86 }
def putpalette(self, data, rawmode="RGB"): from . import ImagePalette if self.mode not in ("L", "LA", "P", "PA"): raise ValueError("illegal image mode") if isinstance(data, ImagePalette.ImagePalette): palette = ImagePalette.raw(data.rawmode, data.palette) else: if not isinstance(data, bytes): data = bytes(data) palette = ImagePalette.raw(rawmode, data) self.mode = "PA" if "A" in self.mode else "P" self.palette = palette self.palette.mode = "RGB" self.load() # install new palette
79,329
268,055
41
test/lib/ansible_test/_internal/timeout.py
19
14
def get_timeout() -> t.Optional[t.Dict[str, t.Any]]: if not os.path.exists(TIMEOUT_PATH): return None data = read_json_file(TIMEOUT_PATH) data['deadline'] = datetime.datetime.strptime(data['deadline']
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
get_timeout
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
timeout.py
10
7
https://github.com/ansible/ansible.git
2
60
0
16
100
Python
{ "docstring": "Return details about the currently set timeout, if any, otherwise return None.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def get_timeout() -> t.Optional[t.Dict[str, t.Any]]: if not os.path.exists(TIMEOUT_PATH): return None data = read_json_file(TIMEOUT_PATH) data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ') return data
@test_utils.run_v2_only
83,360
280,504
15
keras/saving/experimental/saving_lib_test.py
11
12
def my_mean_squared_error(y_true, y_pred): return backend.mean(tf.math.squared_dif
Move new optimizer out of optimizer_experimental/ directory. PiperOrigin-RevId: 488998585
my_mean_squared_error
5a105aadbdc6fde2c2529280c4789864adbb81c7
keras
saving_lib_test.py
10
2
https://github.com/keras-team/keras.git
1
29
1
11
58
Python
{ "docstring": "Identical to built-in `mean_squared_error`, added here as a custom\n\n func.\n ", "language": "en", "n_whitespaces": 16, "n_words": 10, "vocab_size": 10 }
def my_mean_squared_error(y_true, y_pred): return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1) module_my_mean_squared_error = my_mean_squared_error @test_utils.run_v2_only
19,916
100,438
278
lib/model/session.py
80
20
def _amd_predict_with_optimized_batchsizes(self, feed, batch_size): if isinstance(feed, np.ndarray): feed = [feed] items = feed[0].shape[0] done_items = 0 results = [] while done_items < items: if batch_size < 4: # Not much difference in BS < 4 batch_size = 1 batch_items = ((items - done_items) // batch_size) * batch_size if batch_items: pred_data = [x[done_items:done_items + batch_items] for x in feed] pred = self._model.predict(pred_data, batch_size=batch_size) done_items += batch_items results.append(pred) batch_size //= 2 if isinstance(results[0], np.ndarray): return np.concatenate(results) return
Update all Keras Imports to be conditional (#1214) * Remove custom keras importer * first round keras imports fix * launcher.py: Remove KerasFinder references * 2nd round keras imports update (lib and extract) * 3rd round keras imports update (train) * remove KerasFinder from tests * 4th round keras imports update (tests)
_amd_predict_with_optimized_batchsizes
aa39234538a8f83e6aa2b60b8275a570e8876ac2
faceswap
session.py
14
19
https://github.com/deepfakes/faceswap.git
8
146
0
56
235
Python
{ "docstring": " Minimizes the amount of kernels to be compiled when using the ``amd`` backend with\n varying batch sizes while trying to keep the batchsize as high as possible.\n\n Parameters\n ----------\n feed: numpy.ndarray or list\n The feed to be provided to the model as input. This should be a ``numpy.ndarray``\n for single inputs or a ``list`` of ``numpy.ndarray`` objects for multiple inputs.\n batch_size: int\n The upper batchsize to use.\n ", "language": "en", "n_whitespaces": 143, "n_words": 67, "vocab_size": 49 }
def _amd_predict_with_optimized_batchsizes(self, feed, batch_size): if isinstance(feed, np.ndarray): feed = [feed] items = feed[0].shape[0] done_items = 0 results = [] while done_items < items: if batch_size < 4: # Not much difference in BS < 4 batch_size = 1 batch_items = ((items - done_items) // batch_size) * batch_size if batch_items: pred_data = [x[done_items:done_items + batch_items] for x in feed] pred = self._model.predict(pred_data, batch_size=batch_size) done_items += batch_items results.append(pred) batch_size //= 2 if isinstance(results[0], np.ndarray): return np.concatenate(results) return [np.concatenate(x) for x in zip(*results)]
24,044
110,304
73
lib/matplotlib/patches.py
23
6
def set_positions(self, posA, posB): if posA is not None:
Doc: Fix grammar and spelling
set_positions
03a0b5ea238014ba87f74ef766928287726aa00a
matplotlib
patches.py
10
6
https://github.com/matplotlib/matplotlib.git
3
43
0
15
67
Python
{ "docstring": "\n Set the start and end positions of the connecting path.\n\n Parameters\n ----------\n posA, posB : None, tuple\n (x, y) coordinates of arrow tail and arrow head respectively. If\n `None` use current value.\n ", "language": "en", "n_whitespaces": 90, "n_words": 32, "vocab_size": 28 }
def set_positions(self, posA, posB): if posA is not None: self._posA_posB[0] = posA if posB is not None: self._posA_posB[1] = posB self.stale = True
18,130
86,579
557
tests/sentry/api/endpoints/test_organization_metric_data.py
101
34
def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(self): for tag, value, numbers in ( ("transaction", "/foo/", [10, 11, 12]), ("transaction", "/bar/", [4, 5, 6]), ): for subvalue in numbers: self.store_performance_metric( name=TransactionMRI.MEASUREMENTS_LCP.value, tags={tag: value}, value=subvalue, ) response = self.get_success_response( self.organization.slug, field=[ f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", f"count_unique({TransactionMetricKey.USER.value})", ], statsPeriod="1h", interval="1h", groupBy=["project_id", "transaction"], orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", useCase="performance", ) groups = response.data["groups"] assert len(groups) == 2 expected = [ ("/bar/", 5.
feat(metrics): Standardize tests and fix overall flakiness [TET-437] (#39660)
test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data
c67c560f667e6fc7fee2c6d62ac3987ba54f89d5
sentry
test_organization_metric_data.py
14
39
https://github.com/getsentry/sentry.git
4
239
0
82
431
Python
{ "docstring": "\n Test that ensures when transactions table has null values for some fields (i.e. fields\n with a different entity than the entity of the field in the order by), then the table gets\n populated accordingly\n ", "language": "en", "n_whitespaces": 63, "n_words": 34, "vocab_size": 28 }
def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(self): for tag, value, numbers in ( ("transaction", "/foo/", [10, 11, 12]), ("transaction", "/bar/", [4, 5, 6]), ): for subvalue in numbers: self.store_performance_metric( name=TransactionMRI.MEASUREMENTS_LCP.value, tags={tag: value}, value=subvalue, ) response = self.get_success_response( self.organization.slug, field=[ f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", f"count_unique({TransactionMetricKey.USER.value})", ], statsPeriod="1h", interval="1h", groupBy=["project_id", "transaction"], orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", useCase="performance", ) groups = response.data["groups"] assert len(groups) == 2 expected = [ ("/bar/", 5.0, 5), ("/foo/", 11.0, 1), ] for (expected_tag_value, expected_lcp_count, users), group in zip(expected, groups): # With orderBy, you only get totals: assert group["by"] == {"transaction": expected_tag_value, "project_id": self.project.id} assert group["totals"] == { f"count_unique({TransactionMetricKey.USER.value})": 0, f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})": expected_lcp_count, } assert group["series"] == { f"count_unique({TransactionMetricKey.USER.value})": [0], f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})": [expected_lcp_count], }
14,369
66,873
13
erpnext/payroll/doctype/employee_benefit_application/employee_benefit_application.py
21
10
def get_earning_components_max_benefits(employee, date, earning_component):
style: format code with black
get_earning_components_max_benefits
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
employee_benefit_application.py
9
14
https://github.com/frappe/erpnext.git
2
38
0
18
56
Python
{ "docstring": "\n\t\t\tselect amount\n\t\t\tfrom `tabSalary Detail`\n\t\t\twhere parent = %s and is_flexible_benefit = 1\n\t\t\tand salary_component = %s\n\t\t\torder by name\n\t\t", "language": "en", "n_whitespaces": 15, "n_words": 20, "vocab_size": 16 }
def get_earning_components_max_benefits(employee, date, earning_component): salary_structure = get_assigned_salary_structure(employee, date) amount = frappe.db.sql( , salary_structure, earning_component, ) return amount if amount else 0
80,549
270,731
72
keras/engine/base_layer.py
22
9
def get_input_mask_at(self, node_index): inputs = self.get_input_at(node_index) if isinstance(inputs, list): return [getattr(x, "_keras_mask", None) for x in inputs] else:
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
get_input_mask_at
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
base_layer.py
11
6
https://github.com/keras-team/keras.git
3
50
0
19
80
Python
{ "docstring": "Retrieves the input mask tensor(s) of a layer at a given node.\n\n Args:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple inputs).\n ", "language": "en", "n_whitespaces": 150, "n_words": 51, "vocab_size": 39 }
def get_input_mask_at(self, node_index): inputs = self.get_input_at(node_index) if isinstance(inputs, list): return [getattr(x, "_keras_mask", None) for x in inputs] else: return getattr(inputs, "_keras_mask", None)
1,726
9,844
52
jina/peapods/networking.py
9
1
def get_default_grpc_options(): retu
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
get_default_grpc_options
933415bfa1f9eb89f935037014dfed816eb9815d
jina
networking.py
8
5
https://github.com/jina-ai/jina.git
1
22
0
8
39
Python
{ "docstring": "\n Returns a list of default options used for creating grpc channels.\n Documentation is here https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h\n :returns: list of tuples defining grpc parameters\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 19 }
def get_default_grpc_options(): return [ ('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1), ]
5,634
30,582
43
src/ocrmypdf/builtin_plugins/concurrency.py
11
6
def _cancel_futures_kwargs(self): if sys.version_info[:2] < (3, 9): return {} return dict(cancel_futures=Tru
Add shim for cancel_futures in older Pythons Thanks @hfwittmann Closes #993 Co-authored-by: H. Felix Wittmann <[email protected]>
_cancel_futures_kwargs
6b425aaebe33703bd44b1b15571e4af8533b851a
OCRmyPDF
concurrency.py
8
4
https://github.com/ocrmypdf/OCRmyPDF.git
2
31
0
10
51
Python
{ "docstring": "Shim older Pythons that do not have Executor.shutdown(...cancel_futures=).\n\n Remove this code when support for Python 3.8 is dropped.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 18 }
def _cancel_futures_kwargs(self): if sys.version_info[:2] < (3, 9): return {} return dict(cancel_futures=True)
27,734
124,984
18
python/ray/data/_internal/util.py
9
7
def _estimate_available_parallelism() -> int: cur_pg = ray.util.get_current_placement_
[data] Add warnings when DatasetPipelines are under-parallelized or using too much memory (#26592) Currently, it's not very easy to figure out why a DatasetPipeline may be underperforming. Add some warnings to help guide the user. As a next step, we can try to default to a good pipeline setting based on these constraints.
_estimate_available_parallelism
ef091c382eea427783ea75531fe9d5a5f008107c
ray
util.py
9
5
https://github.com/ray-project/ray.git
1
21
0
9
38
Python
{ "docstring": "Estimates the available CPU parallelism for this Dataset in the cluster.\n If we are currently in a placement group, take that into account.", "language": "en", "n_whitespaces": 25, "n_words": 23, "vocab_size": 21 }
def _estimate_available_parallelism() -> int: cur_pg = ray.util.get_current_placement_group() return _estimate_avail_cpus(cur_pg)
41,588
175,299
113
Lib/enum.py
57
11
def bin(num, max_bits=None): ceiling = 2 ** (num).bit_length() if num >= 0: s = bltns.bin(num + ceiling).replace('1', '0', 1) else:
bpo-40066: [Enum] update str() and format() output (GH-30582) Undo rejected PEP-663 changes: - restore `repr()` to its 3.10 status - restore `str()` to its 3.10 status New changes: - `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result - zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == '<Color: 0>'` - update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type - added `_numeric_repr_` to `Flag` to control display of unnamed values - enums without doc strings have a more comprehensive doc string added - `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`
bin
acf7403f9baea3ae1119fc6b4a3298522188bf96
cpython
enum.py
16
12
https://github.com/python/cpython.git
4
118
0
44
192
Python
{ "docstring": "\n Like built-in bin(), except negative values are represented in\n twos-compliment, and the leading bit always indicates sign\n (0=positive, 1=negative).\n\n >>> bin(10)\n '0b0 1010'\n >>> bin(~10) # ~10 is -11\n '0b1 0101'\n ", "language": "en", "n_whitespaces": 58, "n_words": 31, "vocab_size": 30 }
def bin(num, max_bits=None): ceiling = 2 ** (num).bit_length() if num >= 0: s = bltns.bin(num + ceiling).replace('1', '0', 1) else: s = bltns.bin(~num ^ (ceiling - 1) + ceiling) sign = s[:3] digits = s[3:] if max_bits is not None: if len(digits) < max_bits: digits = (sign[-1] * max_bits + digits)[-max_bits:] return "%s %s" % (sign, digits)
76,691
261,218
31
sklearn/utils/__init__.py
15
8
def axis0_safe_slice(X, mask, len_mask): if len_mask
DOC Ensure that sklearn.utils.axis0_safe_slice passes numpydoc (#24561)
axis0_safe_slice
537c325f2927895449ce418b3a77750135c0ba7b
scikit-learn
__init__.py
11
4
https://github.com/scikit-learn/scikit-learn.git
2
45
0
14
68
Python
{ "docstring": "Return a mask which is safer to use on X than safe_mask.\n\n This mask is safer than safe_mask since it returns an\n empty array, when a sparse matrix is sliced with a boolean mask\n with all False, instead of raising an unhelpful error in older\n versions of SciPy.\n\n See: https://github.com/scipy/scipy/issues/5361\n\n Also note that we can avoid doing the dot product by checking if\n the len_mask is not zero in _huber_loss_and_gradient but this\n is not going to be the bottleneck, since the number of outliers\n and non_outliers are typically non-zero and it makes the code\n tougher to follow.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n Data on which to apply mask.\n\n mask : ndarray\n Mask to be used on X.\n\n len_mask : int\n The length of the mask.\n\n Returns\n -------\n mask : ndarray\n Array that is safe to use on X.\n ", "language": "en", "n_whitespaces": 225, "n_words": 140, "vocab_size": 91 }
def axis0_safe_slice(X, mask, len_mask): if len_mask != 0: return X[safe_mask(X, mask), :] return np.zeros(shape=(0, X.shape[1]))
76,401
260,662
366
sklearn/impute/_base.py
121
20
def _most_frequent(array, extra_value, n_repeat): # Compute the most frequent value in array only if array.size > 0: if array.dtype == object: # scipy.stats.mode is slow with object dtype array. # Python Counter is more efficient counter = Counter(array) most_frequent_count = counter.most_common(1)[0][1] # tie breaking similarly to scipy.stats.mode most_frequent_value = min( value for value, count in counter.items() if count == most_frequent_count ) else: mode = _mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0
MAINT fix the way to call stats.mode (#23633) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Meekail Zain <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
_most_frequent
02a4b342181e5ff0226081691308414e53c3107b
scikit-learn
_base.py
15
25
https://github.com/scikit-learn/scikit-learn.git
10
137
0
67
221
Python
{ "docstring": "Compute the most frequent value in a 1d array extended with\n [extra_value] * n_repeat, where extra_value is assumed to be not part\n of the array.", "language": "en", "n_whitespaces": 30, "n_words": 25, "vocab_size": 24 }
def _most_frequent(array, extra_value, n_repeat): # Compute the most frequent value in array only if array.size > 0: if array.dtype == object: # scipy.stats.mode is slow with object dtype array. # Python Counter is more efficient counter = Counter(array) most_frequent_count = counter.most_common(1)[0][1] # tie breaking similarly to scipy.stats.mode most_frequent_value = min( value for value, count in counter.items() if count == most_frequent_count ) else: mode = _mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 # Compare to array + [extra_value] * n_repeat if most_frequent_count == 0 and n_repeat == 0: return np.nan elif most_frequent_count < n_repeat: return extra_value elif most_frequent_count > n_repeat: return most_frequent_value elif most_frequent_count == n_repeat: # tie breaking similarly to scipy.stats.mode return min(most_frequent_value, extra_value)
75,886
259,737
503
sklearn/discriminant_analysis.py
144
24
def _cov(X, shrinkage=None, covariance_estimator=None): if covariance_estimator is None: shrinkage = "empirical" if shrinkage is None else shrinkage if isinstance(shrinkage, str): if shrinkage == "auto": sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = ledoit_wolf(X)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] elif shrinkage == "empirical": s = empirical_covariance(X) else: raise ValueError("unknown shrinkage parameter") elif isinstance(shrinkage, Real): if shrinkage < 0 or shrinkage > 1: raise ValueError("shrinkage parameter must be between 0 and 1") s = shrunk_covariance(empirical_covariance(X), shrinkage) else: raise TypeError("shrinkage must be a float or a string") else: if shrinkage is not None and shrinkage != 0: raise ValueError( "covariance_estimator and shrinkage parameters " "are not None. Only one of the two can be set." ) covariance_estimator.fit(X) if not hasattr(covariance_estimator, "covariance_"): raise ValueError( "%s does not have a covariance_ attribute" % covariance_estimator.__class__.__name__ ) s = covariance_estimator.covariance_ return s
MNT Combine multiple `isinstance` call (#23204)
_cov
3f1833d5805a99894f1fc6b858a9ac663e175997
scikit-learn
discriminant_analysis.py
16
33
https://github.com/scikit-learn/scikit-learn.git
12
197
0
87
333
Python
{ "docstring": "Estimate covariance matrix (using optional covariance_estimator).\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n shrinkage : {'empirical', 'auto'} or float, default=None\n Shrinkage parameter, possible values:\n - None or 'empirical': no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Shrinkage parameter is ignored if `covariance_estimator`\n is not None.\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying on the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in :mod:`sklearn.covariance``.\n if None the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n s : ndarray of shape (n_features, n_features)\n Estimated covariance matrix.\n ", "language": "en", "n_whitespaces": 265, "n_words": 126, "vocab_size": 93 }
def _cov(X, shrinkage=None, covariance_estimator=None): if covariance_estimator is None: shrinkage = "empirical" if shrinkage is None else shrinkage if isinstance(shrinkage, str): if shrinkage == "auto": sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = ledoit_wolf(X)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] elif shrinkage == "empirical": s = empirical_covariance(X) else: raise ValueError("unknown shrinkage parameter") elif isinstance(shrinkage, Real): if shrinkage < 0 or shrinkage > 1: raise ValueError("shrinkage parameter must be between 0 and 1") s = shrunk_covariance(empirical_covariance(X), shrinkage) else: raise TypeError("shrinkage must be a float or a string") else: if shrinkage is not None and shrinkage != 0: raise ValueError( "covariance_estimator and shrinkage parameters " "are not None. Only one of the two can be set." ) covariance_estimator.fit(X) if not hasattr(covariance_estimator, "covariance_"): raise ValueError( "%s does not have a covariance_ attribute" % covariance_estimator.__class__.__name__ ) s = covariance_estimator.covariance_ return s
4,211
22,139
50
pipenv/patched/pip/_vendor/requests/utils.py
24
9
def from_key_val_list(value): if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError("cannot encode objects tha
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
from_key_val_list
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
utils.py
10
6
https://github.com/pypa/pipenv.git
3
39
0
22
63
Python
{ "docstring": "Take an object and test to see if it can be represented as a\n dictionary. Unless it can not be represented as such, return an\n OrderedDict, e.g.,\n\n ::\n\n >>> from_key_val_list([('key', 'val')])\n OrderedDict([('key', 'val')])\n >>> from_key_val_list('string')\n Traceback (most recent call last):\n ...\n ValueError: cannot encode objects that are not 2-tuples\n >>> from_key_val_list({'key': 'val'})\n OrderedDict([('key', 'val')])\n\n :rtype: OrderedDict\n ", "language": "en", "n_whitespaces": 127, "n_words": 56, "vocab_size": 44 }
def from_key_val_list(value): if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError("cannot encode objects that are not 2-tuples") return OrderedDict(value)
3,373
20,445
237
pipenv/patched/notpip/_vendor/pygments/lexers/__init__.py
92
14
def load_lexer_from_file(filename, lexername="CustomLexer", **options): try: # This empty dict will contain the namespace for the exec'd file custom_namespace = {} with open(filename, 'rb') as f: exec(f.read(), custom_namespace) #
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
load_lexer_from_file
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
__init__.py
13
16
https://github.com/pypa/pipenv.git
5
100
0
70
176
Python
{ "docstring": "Load a lexer from a file.\n\n This method expects a file located relative to the current working\n directory, which contains a Lexer class. By default, it expects the\n Lexer to be name CustomLexer; you can specify your own class name\n as the second argument to this function.\n\n Users should be very careful with the input, because this method\n is equivalent to running eval on the input file.\n\n Raises ClassNotFound if there are any problems importing the Lexer.\n\n .. versionadded:: 2.2\n ", "language": "en", "n_whitespaces": 107, "n_words": 80, "vocab_size": 62 }
def load_lexer_from_file(filename, lexername="CustomLexer", **options): try: # This empty dict will contain the namespace for the exec'd file custom_namespace = {} with open(filename, 'rb') as f: exec(f.read(), custom_namespace) # Retrieve the class `lexername` from that namespace if lexername not in custom_namespace: raise ClassNotFound('no valid %s class found in %s' % (lexername, filename)) lexer_class = custom_namespace[lexername] # And finally instantiate it with the options return lexer_class(**options) except OSError as err: raise ClassNotFound('cannot read %s: %s' % (filename, err)) except ClassNotFound: raise except Exception as err: raise ClassNotFound('error when loading custom lexer: %s' % err)
13,580
64,228
158
erpnext/utilities/product.py
214
45
def get_price(item_code, price_list, customer_group, company, qty=1): from erpnext.e_commerce.shopping_cart.cart import get_party template_item_code = frappe.db.get_value("Item", item_code, "variant_of") if price_list: price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"], filters={"price_list": price_list, "item_code": item_code}) if template_item_code and not price: price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"], filters={"price_list": price_list, "item_code": template_item_code}) if price: party = get_party() pricing_rule_dict = frappe._dict({ "item_code": item_code, "qty": qty, "stock_qty": qty, "transaction_type": "selling", "price_list": price_list, "customer_group": customer_group, "company": company, "conversion_rate": 1, "for_shopping_cart": True, "currency": frappe.db.get_value("Price List", price_list, "currency") }) if party and party.doctype == "Customer": pricing_rule_dict.update({"customer": party.name}) pricing_rule = get_pricing_rule_for_item(pricing_rule_dict) price_obj = price[0] if pricing_rule: # price without any rules applied mrp = price_obj.price_list_rate or 0 if pricing_rule.pricing_rule_for == "Discount Percentage": price_obj.discount_percent = pricing_rule.discount_percentage price_obj.formatted_discount_percent = str(flt(pricing_rule.discount_percentage, 0)) + "%" price_obj.price_list_rate = flt(price_obj.price_list_rate * (1.0 - (flt(pricing_rule.discount_percentage) / 100.0))) if pricing_rule.pricing_rule_for == "Rate": rate_discount = flt(mrp) - flt(pricing_rule.price_list_rate) if rate_discount > 0: price_obj.formatted_discount_rate = fmt_money(rate_discount, currency=price_obj["currency"]) price_obj.price_list_rate = pricing_rule.price_list_rate or 0 if price_obj: price_obj["formatted_price"] = fmt_money(price_obj["price_list_rate"], currency=price_obj["currency"]) if mrp != price_obj["price_list_rate"]: price_obj["formatted_mrp"] = fmt_money(mrp, currency=price_obj["currency"])
fix: fetch correct selling price.
get_price
282fbf4b07740e14566f16d749b549239d7253a7
erpnext
product.py
24
58
https://github.com/frappe/erpnext.git
22
509
0
128
854
Python
{ "docstring": "select\tC.conversion_factor\n\t\t\t\t\tfrom `tabUOM Conversion Detail` C\n\t\t\t\t\tinner join `tabItem` I on C.parent = I.name and C.uom = I.sales_uom\n\t\t\t\t\twhere I.name = %s", "language": "en", "n_whitespaces": 18, "n_words": 23, "vocab_size": 20 }
def get_price(item_code, price_list, customer_group, company, qty=1): from erpnext.e_commerce.shopping_cart.cart import get_party template_item_code = frappe.db.get_value("Item", item_code, "variant_of") if price_list: price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"], filters={"price_list": price_list, "item_code": item_code}) if template_item_code and not price: price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"], filters={"price_list": price_list, "item_code": template_item_code}) if price: party = get_party() pricing_rule_dict = frappe._dict({ "item_code": item_code, "qty": qty, "stock_qty": qty, "transaction_type": "selling", "price_list": price_list, "customer_group": customer_group, "company": company, "conversion_rate": 1, "for_shopping_cart": True, "currency": frappe.db.get_value("Price List", price_list, "currency") }) if party and party.doctype == "Customer": pricing_rule_dict.update({"customer": party.name}) pricing_rule = get_pricing_rule_for_item(pricing_rule_dict) price_obj = price[0] if pricing_rule: # price without any rules applied mrp = price_obj.price_list_rate or 0 if pricing_rule.pricing_rule_for == "Discount Percentage": price_obj.discount_percent = pricing_rule.discount_percentage price_obj.formatted_discount_percent = str(flt(pricing_rule.discount_percentage, 0)) + "%" price_obj.price_list_rate = flt(price_obj.price_list_rate * (1.0 - (flt(pricing_rule.discount_percentage) / 100.0))) if pricing_rule.pricing_rule_for == "Rate": rate_discount = flt(mrp) - flt(pricing_rule.price_list_rate) if rate_discount > 0: price_obj.formatted_discount_rate = fmt_money(rate_discount, currency=price_obj["currency"]) price_obj.price_list_rate = pricing_rule.price_list_rate or 0 if price_obj: price_obj["formatted_price"] = fmt_money(price_obj["price_list_rate"], currency=price_obj["currency"]) if mrp != price_obj["price_list_rate"]: price_obj["formatted_mrp"] = fmt_money(mrp, currency=price_obj["currency"]) price_obj["currency_symbol"] = not cint(frappe.db.get_default("hide_currency_symbol")) \ and (frappe.db.get_value("Currency", price_obj.currency, "symbol", cache=True) or price_obj.currency) \ or "" uom_conversion_factor = frappe.db.sql(, item_code) uom_conversion_factor = uom_conversion_factor[0][0] if uom_conversion_factor else 1 price_obj["formatted_price_sales_uom"] = fmt_money(price_obj["price_list_rate"] * uom_conversion_factor, currency=price_obj["currency"]) if not price_obj["price_list_rate"]: price_obj["price_list_rate"] = 0 if not price_obj["currency"]: price_obj["currency"] = "" if not price_obj["formatted_price"]: price_obj["formatted_price"], price_obj["formatted_mrp"] = "", "" return price_obj