ast_errors
stringlengths 0
3.2k
| d_id
int64 44
121k
| id
int64 70
338k
| n_whitespaces
int64 3
14k
| path
stringlengths 8
134
| n_words
int64 4
4.82k
| n_identifiers
int64 1
131
| random_cut
stringlengths 16
15.8k
| commit_message
stringlengths 2
15.3k
| fun_name
stringlengths 1
84
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| file_name
stringlengths 5
79
| ast_levels
int64 6
31
| nloc
int64 1
548
| url
stringlengths 31
59
| complexity
int64 1
66
| token_counts
int64 6
2.13k
| n_ast_errors
int64 0
28
| vocab_size
int64 4
1.11k
| n_ast_nodes
int64 15
19.2k
| language
stringclasses 1
value | documentation
dict | code
stringlengths 101
62.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@pytest.mark.parametrize("criterion", ("poisson", "squared_error")) | 75,307 | 258,587 | 483 | sklearn/ensemble/tests/test_forest.py | 247 | 50 | def test_poisson_vs_mse():
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
# We prevent some overfitting by setting min_samples_split=10.
forest_poi = RandomForestRegressor(
criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng
)
forest_mse = RandomForestRegressor(
criterion="squared_error",
min_samples_leaf=10,
max_features="sqrt",
random_state=rng,
)
forest_poi.fit(X_train, y_train)
forest_mse.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
| FIX poisson proxy_impurity_improvement (#22191) | test_poisson_vs_mse | 2b15b908c11b90a15253394b1a03bd535720d6ce | scikit-learn | test_forest.py | 14 | 32 | https://github.com/scikit-learn/scikit-learn.git | 3 | 279 | 1 | 163 | 458 | Python | {
"docstring": "Test that random forest with poisson criterion performs better than\n mse for a poisson target.\n\n There is a similar test for DecisionTreeRegressor.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 22,
"vocab_size": 19
} | def test_poisson_vs_mse():
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
# We prevent some overfitting by setting min_samples_split=10.
forest_poi = RandomForestRegressor(
criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng
)
forest_mse = RandomForestRegressor(
criterion="squared_error",
min_samples_leaf=10,
max_features="sqrt",
random_state=rng,
)
forest_poi.fit(X_train, y_train)
forest_mse.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y, val in [(X_train, y_train, "train"), (X_test, y_test, "test")]:
metric_poi = mean_poisson_deviance(y, forest_poi.predict(X))
# squared_error forest might produce non-positive predictions => clip
# If y = 0 for those, the poisson deviance gets too good.
# If we drew more samples, we would eventually get y > 0 and the
# poisson deviance would explode, i.e. be undefined. Therefore, we do
# not clip to a tiny value like 1e-15, but to 1e-6. This acts like a
# small penalty to the non-positive predictions.
metric_mse = mean_poisson_deviance(
y, np.clip(forest_mse.predict(X), 1e-6, None)
)
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
# As squared_error might correctly predict 0 in train set, its train
# score can be better than Poisson. This is no longer the case for the
# test set. But keep the above comment for clipping in mind.
if val == "test":
assert metric_poi < metric_mse
assert metric_poi < 0.5 * metric_dummy
@pytest.mark.parametrize("criterion", ("poisson", "squared_error")) |
29,265 | 130,419 | 77 | python/ray/autoscaler/_private/cli_logger.py | 28 | 14 | def _external_caller_info():
frame = inspect.currentframe()
caller = frame
levels = 0
while caller.f_code.co_filename == __file__:
caller = caller.f_back
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _external_caller_info | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | cli_logger.py | 11 | 11 | https://github.com/ray-project/ray.git | 2 | 59 | 0 | 22 | 100 | Python | {
"docstring": "Get the info from the caller frame.\n\n Used to override the logging function and line number with the correct\n ones. See the comment on _patched_makeRecord for more info.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 28,
"vocab_size": 24
} | def _external_caller_info():
frame = inspect.currentframe()
caller = frame
levels = 0
while caller.f_code.co_filename == __file__:
caller = caller.f_back
levels += 1
return {
"lineno": caller.f_lineno,
"filename": os.path.basename(caller.f_code.co_filename),
}
|
|
45,568 | 186,660 | 84 | certbot-apache/certbot_apache/_internal/override_centos.py | 27 | 11 | def _try_restart_fedora(self) -> None:
try:
util.run_script(['systemctl', 'restart', 'httpd'])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
# Finish with actual config check to see if systemctl restart helped
super().config_test()
| Add typing to certbot.apache (#9071)
* Add typing to certbot.apache
Co-authored-by: Adrien Ferrand <[email protected]> | _try_restart_fedora | 7d9e9a49005de7961e84d2a7c608db57dbab3046 | certbot | override_centos.py | 12 | 9 | https://github.com/certbot/certbot.git | 2 | 46 | 0 | 27 | 85 | Python | {
"docstring": "\n Tries to restart httpd using systemctl to generate the self signed key pair.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | def _try_restart_fedora(self) -> None:
try:
util.run_script(['systemctl', 'restart', 'httpd'])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
# Finish with actual config check to see if systemctl restart helped
super().config_test()
|
|
56,078 | 220,661 | 78 | python3.10.4/Lib/asyncio/selector_events.py | 25 | 11 | async def sock_accept(self, sock):
base_events._check_ssl_s | add python 3.10.4 for windows | sock_accept | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | selector_events.py | 10 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 50 | 0 | 24 | 86 | Python | {
"docstring": "Accept a connection.\n\n The socket must be bound to an address and listening for connections.\n The return value is a pair (conn, address) where conn is a new socket\n object usable to send and receive data on the connection, and address\n is the address bound to the socket on the other end of the connection.\n ",
"language": "en",
"n_whitespaces": 90,
"n_words": 55,
"vocab_size": 35
} | async def sock_accept(self, sock):
base_events._check_ssl_socket(sock)
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
self._sock_accept(fut, sock)
return await fut
|
|
572 | 3,825 | 133 | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py | 37 | 12 | def test_state(self, api, state):
stream = AdsInsights(
api=api,
start_ | 🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)
* Facebook Marketing performance improvement
* add comments and little refactoring
* fix integration tests with the new config
* improve job status handling, limit concurrency to 10
* fix campaign jobs, refactor manager
* big refactoring of async jobs, support random order of slices
* update source _read_incremental to hook new state logic
* fix issues with timeout
* remove debugging and clean up, improve retry logic
* merge changes from #8234
* fix call super _read_increment
* generalize batch execution, add use_batch flag
* improve coverage, do some refactoring of spec
* update test, remove overrides of source
* add split by AdSet
* add smaller insights
* fix end_date < start_date case
* add account_id to PK
* add notes
* fix new streams
* fix reversed incremental stream
* update spec.json for SAT
* upgrade CDK and bump version
Co-authored-by: Dmytro Rezchykov <[email protected]>
Co-authored-by: Eugene Kulak <[email protected]> | test_state | a3aae8017a0a40ff2006e2567f71dccb04c997a5 | airbyte | test_base_insight_streams.py | 11 | 12 | https://github.com/airbytehq/airbyte.git | 1 | 96 | 0 | 24 | 152 | Python | {
"docstring": "State setter/getter should work with all combinations",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def test_state(self, api, state):
stream = AdsInsights(
api=api,
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
)
assert stream.state == {}
stream.state = state
actual_state = stream.state
actual_state["slices"] = sorted(actual_state.get("slices", []))
state["slices"] = sorted(state.get("slices", []))
assert actual_state == state
|
|
19,288 | 96,187 | 25 | src/sentry/search/events/builder.py | 11 | 3 | def get_snql_query(self) -> None:
raise NotImplementedError("get_snql_ | feat(MEP): Add initial framework for metric queries (#31649)
- This adds a MetricsQueryBuilder, which works very similarily to our
QueryBuilder, but with specific handlers for how metrics construct
queries
- This MetricsQueryBuilder does not yet construct snql queries, and will
not because table queries will require multiple queries to construct
similar table data
- that is, if we want [transaction, p95, count_unique(user)], we need
a query against distributions with [transaction, p95] followed by a
second query for [transaction, count_unique(user)] against the sets
table
- This is so we can maintain a sortby | get_snql_query | cf30c11a194aa5e61d8d7c7fc506764f846fcf82 | sentry | builder.py | 8 | 4 | https://github.com/getsentry/sentry.git | 1 | 13 | 0 | 11 | 26 | Python | {
"docstring": "Because metrics table queries need to make multiple requests per metric type this function cannot be\n inmplemented see run_query",
"language": "en",
"n_whitespaces": 25,
"n_words": 19,
"vocab_size": 19
} | def get_snql_query(self) -> None:
raise NotImplementedError("get_snql_query cannot be implemented for MetricsQueryBuilder")
|
|
30,004 | 133,391 | 33 | python/ray/util/sgd/torch/worker_group.py | 12 | 8 | def _validate(self, params):
remote_worker_stats = [w.validate.remote(**params) for w in self.remote_workers]
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _validate | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | worker_group.py | 10 | 3 | https://github.com/ray-project/ray.git | 2 | 29 | 0 | 11 | 47 | Python | {
"docstring": "Runs validation for each worker. Returns results as promises.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _validate(self, params):
remote_worker_stats = [w.validate.remote(**params) for w in self.remote_workers]
return remote_worker_stats
|
|
23,404 | 108,967 | 508 | lib/mpl_toolkits/mplot3d/axes3d.py | 94 | 31 | def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
_api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),
aspect=aspect)
super().set_aspect(
aspect='auto', adjustable=adjustable, anchor=anchor, share=share)
if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):
if aspect == 'equal':
axis_indices = [0, 1, 2]
elif aspect == 'equalxy':
axis_indices = [0, 1]
elif aspect == 'equalxz':
axis_indices = [0, 2]
elif aspect == 'equalyz':
axis_indices = [1, 2]
view_intervals = np.array([self.xaxis.get_view_interval(),
self.yaxis.get_view_interval(),
self.zaxis.get_view_interval()])
mean = np.mean(view_intervals, axis=1)
delta = np.max(np.ptp(view_intervals, axis=1))
deltas = delta * self._box_aspect / min(self._box_aspect)
for i, set_lim in enumerate((self.set_xlim | Add equalxy, equalyz, equalxz aspect ratios
Update docstrings | set_aspect | 31d13198ecf6969b1b693c28a02b0805f3f20420 | matplotlib | axes3d.py | 16 | 25 | https://github.com/matplotlib/matplotlib.git | 8 | 255 | 0 | 65 | 399 | Python | {
"docstring": "\n Set the aspect ratios.\n\n Parameters\n ----------\n aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'}\n Possible values:\n\n ========= ==================================================\n value description\n ========= ==================================================\n 'auto' automatic; fill the position rectangle with data.\n 'equal' adapt all the axes to have equal aspect ratios.\n 'equalxy' adapt the x and y axes to have equal aspect ratios.\n 'equalxz' adapt the x and z axes to have equal aspect ratios.\n 'equalyz' adapt the y and z axes to have equal aspect ratios.\n ========= ==================================================\n\n adjustable : None\n Currently ignored by Axes3D\n\n If not *None*, this defines which parameter will be adjusted to\n meet the required aspect. See `.set_adjustable` for further\n details.\n\n anchor : None or str or 2-tuple of float, optional\n If not *None*, this defines where the Axes will be drawn if there\n is extra space due to aspect constraints. The most common way to\n to specify the anchor are abbreviations of cardinal directions:\n\n ===== =====================\n value description\n ===== =====================\n 'C' centered\n 'SW' lower left corner\n 'S' middle of bottom edge\n 'SE' lower right corner\n etc.\n ===== =====================\n\n See `~.Axes.set_anchor` for further details.\n\n share : bool, default: False\n If ``True``, apply the settings to all shared Axes.\n\n See Also\n --------\n mpl_toolkits.mplot3d.axes3d.Axes3D.set_box_aspect\n ",
"language": "en",
"n_whitespaces": 630,
"n_words": 195,
"vocab_size": 117
} | def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
_api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),
aspect=aspect)
super().set_aspect(
aspect='auto', adjustable=adjustable, anchor=anchor, share=share)
if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):
if aspect == 'equal':
axis_indices = [0, 1, 2]
elif aspect == 'equalxy':
axis_indices = [0, 1]
elif aspect == 'equalxz':
axis_indices = [0, 2]
elif aspect == 'equalyz':
axis_indices = [1, 2]
view_intervals = np.array([self.xaxis.get_view_interval(),
self.yaxis.get_view_interval(),
self.zaxis.get_view_interval()])
mean = np.mean(view_intervals, axis=1)
delta = np.max(np.ptp(view_intervals, axis=1))
deltas = delta * self._box_aspect / min(self._box_aspect)
for i, set_lim in enumerate((self.set_xlim3d,
self.set_ylim3d,
self.set_zlim3d)):
if i in axis_indices:
set_lim(mean[i] - deltas[i]/2., mean[i] + deltas[i]/2.)
|
|
54,713 | 217,315 | 77 | python3.10.4/Lib/enum.py | 16 | 7 | def __getattr__(cls, name):
if _is_dunder(name):
raise AttributeError(name)
try:
return cl | add python 3.10.4 for windows | __getattr__ | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | enum.py | 10 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 38 | 0 | 14 | 62 | Python | {
"docstring": "\n Return the enum member matching `name`\n\n We use __getattr__ instead of descriptors or inserting into the enum\n class' __dict__ in order to support `name` and `value` being both\n properties for enum members (which live in the class' __dict__) and\n enum members themselves.\n ",
"language": "en",
"n_whitespaces": 85,
"n_words": 42,
"vocab_size": 32
} | def __getattr__(cls, name):
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name) from None
|
|
75,904 | 259,759 | 34 | sklearn/cluster/tests/test_bisect_k_means.py | 16 | 16 | def test_n_clusters(n_clusters):
rng = np.random.RandomState(0)
X | FEA Bisecting K-Means (#20031)
Co-authored-by: Gael Varoquaux <[email protected]>
Co-authored-by: Tom Dupré la Tour <[email protected]>
Co-authored-by: Julien Jerphanion <[email protected]>
Co-authored-by: Jérémie du Boisberranger <[email protected]> | test_n_clusters | 0822851f5cb17827939a7d7b4f8c84f43184ae89 | scikit-learn | test_bisect_k_means.py | 10 | 6 | https://github.com/scikit-learn/scikit-learn.git | 1 | 62 | 0 | 14 | 100 | Python | {
"docstring": "Test if resulting labels are in range [0, n_clusters - 1].",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_n_clusters(n_clusters):
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0)
bisect_means.fit(X)
assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters))
|
|
57,228 | 224,175 | 196 | mkdocs/tests/structure/nav_tests.py | 46 | 24 | def test_nested_ungrouped_nav(self):
nav_cfg = [
{'Home': 'index.md'},
{'Contact': 'about/contact.md'},
{'License Title': 'about/sub/license.md'},
]
expected = dedent(
)
cfg = load_config(nav=nav_cfg, site_url='http://example.com/')
fs = [
File(list(item.values())[0], cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
for item in nav_cfg
] | Some manual changes ahead of formatting code with Black | test_nested_ungrouped_nav | 372384d8102ddb4be6360f44d1bfddb8b45435a4 | mkdocs | nav_tests.py | 14 | 23 | https://github.com/mkdocs/mkdocs.git | 2 | 137 | 0 | 37 | 228 | Python | {
"docstring": "\n Page(title='Home', url='/')\n Page(title='Contact', url='/about/contact/')\n Page(title='License Title', url='/about/sub/license/')\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 7,
"vocab_size": 7
} | def test_nested_ungrouped_nav(self):
nav_cfg = [
{'Home': 'index.md'},
{'Contact': 'about/contact.md'},
{'License Title': 'about/sub/license.md'},
]
expected = dedent(
)
cfg = load_config(nav=nav_cfg, site_url='http://example.com/')
fs = [
File(list(item.values())[0], cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
for item in nav_cfg
]
files = Files(fs)
site_navigation = get_navigation(files, cfg)
self.assertEqual(str(site_navigation).strip(), expected)
self.assertEqual(len(site_navigation.items), 3)
self.assertEqual(len(site_navigation.pages), 3)
|
|
29,378 | 130,806 | 156 | python/ray/node.py | 32 | 14 | def _get_log_file_names(self, name, unique=False):
if unique:
log_stdout = self._make_inc_temp(
suffix=".out", prefix=name, directory_name=self._logs_dir
)
log_stderr = self._make_inc_temp(
suffix=".err", prefix=name, directory_name=self._logs_dir
)
else:
log_stdout = os.path.join(self._logs_dir, f"{name}.out")
log_stderr = os.path.join(self._logs_dir, f"{name}.err")
return log_stdout, log_stderr
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _get_log_file_names | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | node.py | 13 | 12 | https://github.com/ray-project/ray.git | 2 | 91 | 0 | 21 | 151 | Python | {
"docstring": "Generate partially randomized filenames for log files.\n\n Args:\n name (str): descriptive string for this log file.\n unique (bool): if true, a counter will be attached to `name` to\n ensure the returned filename is not already used.\n\n Returns:\n A tuple of two file names for redirecting (stdout, stderr).\n ",
"language": "en",
"n_whitespaces": 116,
"n_words": 47,
"vocab_size": 43
} | def _get_log_file_names(self, name, unique=False):
if unique:
log_stdout = self._make_inc_temp(
suffix=".out", prefix=name, directory_name=self._logs_dir
)
log_stderr = self._make_inc_temp(
suffix=".err", prefix=name, directory_name=self._logs_dir
)
else:
log_stdout = os.path.join(self._logs_dir, f"{name}.out")
log_stderr = os.path.join(self._logs_dir, f"{name}.err")
return log_stdout, log_stderr
|
|
56,302 | 221,263 | 74 | python3.10.4/Lib/calendar.py | 24 | 10 | def yeardayscalendar(self, year, width=3):
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
| add python 3.10.4 for windows | yeardayscalendar | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | calendar.py | 11 | 6 | https://github.com/XX-net/XX-Net.git | 3 | 60 | 0 | 20 | 88 | Python | {
"docstring": "\n Return the data for the specified year ready for formatting (similar to\n yeardatescalendar()). Entries in the week lists are day numbers.\n Day numbers outside this month are zero.\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 28,
"vocab_size": 24
} | def yeardayscalendar(self, year, width=3):
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
|
|
51,877 | 207,141 | 81 | tests/admin_filters/tests.py | 28 | 15 | def test_simplelistfilter_without_parameter(self):
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
msg = "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
| Refs #33476 -- Reformatted code with Black. | test_simplelistfilter_without_parameter | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 9 | 7 | https://github.com/django/django.git | 1 | 53 | 0 | 25 | 92 | Python | {
"docstring": "\n Any SimpleListFilter must define a parameter_name.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | def test_simplelistfilter_without_parameter(self):
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
msg = "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
|
|
18,375 | 88,327 | 226 | src/sentry/api/invite_helper.py | 54 | 19 | def from_session_or_email(cls, request, organization, email, instance=None, logger=None):
invite_token, invite_member_id = get_invite_details(request)
try:
if invite_token and invite_member_id:
om = OrganizationMember.objects.get(token=invite_token, id=invite_member_id)
else:
om = OrganizationMember.objects.get(
email=email, organization=organization, user=None
)
except OrganizationMember.DoesNotExist:
# Unable to locate the pending organization member. Cannot setup
# the invite helper.
return None
re | Move invite code functionality from cookie to session (#40905)
Moves the invite functionality from cookies to the session. This is to
harden the security of the platform.
With the cookie approach, a client can manipulate the cookie value for
`pending-invite` resulting in situations where an invite code can be
reused. | from_session_or_email | 565f971da955d57c754a47f5802fe9f9f7c66b39 | sentry | invite_helper.py | 14 | 14 | https://github.com/getsentry/sentry.git | 4 | 107 | 0 | 47 | 161 | Python | {
"docstring": "\n Initializes the ApiInviteHelper by locating the pending organization\n member via the currently set pending invite details in the session, or\n via the passed email if no cookie is currently set.\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 30,
"vocab_size": 23
} | def from_session_or_email(cls, request, organization, email, instance=None, logger=None):
invite_token, invite_member_id = get_invite_details(request)
try:
if invite_token and invite_member_id:
om = OrganizationMember.objects.get(token=invite_token, id=invite_member_id)
else:
om = OrganizationMember.objects.get(
email=email, organization=organization, user=None
)
except OrganizationMember.DoesNotExist:
# Unable to locate the pending organization member. Cannot setup
# the invite helper.
return None
return cls(
request=request, member_id=om.id, token=om.token, instance=instance, logger=logger
)
|
|
89,278 | 290,159 | 48 | tests/components/bluetooth/test_usage.py | 20 | 10 | async def test_multiple_bleak_scanner_instances(hass):
install_multiple_bleak_catcher()
instance = bleak.BleakScanner()
assert isinstance(instance, HaBleakScannerWrapper)
uninstall_multiple_bleak_catcher()
with patch("bleak.get_platform_scanner_backend_type"):
instance = bleak.BleakScanner()
assert not isinstance(instance, HaBleakScannerWrapper)
| Ensure we do not actually create a BleakScanner in the usage test (#81362)
Avoids a failure when bluetooth is turned off when
testing on macos:
bleak.exc.BleakError: Bluetooth device is turned off | test_multiple_bleak_scanner_instances | ab14e55c052433e42224199798b026637614685f | core | test_usage.py | 10 | 8 | https://github.com/home-assistant/core.git | 1 | 47 | 0 | 14 | 86 | Python | {
"docstring": "Test creating multiple BleakScanners without an integration.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | async def test_multiple_bleak_scanner_instances(hass):
install_multiple_bleak_catcher()
instance = bleak.BleakScanner()
assert isinstance(instance, HaBleakScannerWrapper)
uninstall_multiple_bleak_catcher()
with patch("bleak.get_platform_scanner_backend_type"):
instance = bleak.BleakScanner()
assert not isinstance(instance, HaBleakScannerWrapper)
|
|
78,244 | 265,912 | 118 | netbox/utilities/utils.py | 59 | 17 | def highlight_string(value, highlight, trim_pre=None, trim_post=None, trim_placeholder='...'):
# Split value on highlight string
try:
pre, match, post = re.split(fr'({highlight})', value, maxsplit=1, flags=re.IGNORECASE)
except ValueError:
# Match not found
return escape(value)
# Trim pre/post sections to length
if trim_pre and len(pre) > trim_pre:
pre = trim_placeholder + pre[-trim_pre:]
if trim_post and len(post) > trim_post:
post = post[:trim_post] + trim_placeholder
return f'{escape(pre)}<mark>{e | Closes #10560: New global search (#10676)
* Initial work on new search backend
* Clean up search backends
* Return only the most relevant result per object
* Clear any pre-existing cached entries on cache()
* #6003: Implement global search functionality for custom field values
* Tweak field weights & document guidance
* Extend search() to accept a lookup type
* Move get_registry() out of SearchBackend
* Enforce object permissions when returning search results
* Add indexers for remaining models
* Avoid calling remove() on non-cacheable objects
* Use new search backend by default
* Extend search backend to filter by object type
* Clean up search view form
* Enable specifying lookup logic
* Add indexes for value field
* Remove object type selector from search bar
* Introduce SearchTable and enable HTMX for results
* Enable pagination
* Remove legacy search backend
* Cleanup
* Use a UUID for CachedValue primary key
* Refactoring search methods
* Define max search results limit
* Extend reindex command to support specifying particular models
* Add clear() and size to SearchBackend
* Optimize bulk caching performance
* Highlight matched portion of field value
* Performance improvements for reindexing
* Started on search tests
* Cleanup & docs
* Documentation updates
* Clean up SearchIndex
* Flatten search registry to register by app_label.model_name
* Clean up search backend classes
* Clean up RestrictedGenericForeignKey and RestrictedPrefetch
* Resolve migrations conflict | highlight_string | 9628dead07ccef9608b32906aa8194bc948e5a09 | netbox | utils.py | 12 | 10 | https://github.com/netbox-community/netbox.git | 6 | 97 | 0 | 48 | 185 | Python | {
"docstring": "\n Highlight a string within a string and optionally trim the pre/post portions of the original string.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 16,
"vocab_size": 13
} | def highlight_string(value, highlight, trim_pre=None, trim_post=None, trim_placeholder='...'):
# Split value on highlight string
try:
pre, match, post = re.split(fr'({highlight})', value, maxsplit=1, flags=re.IGNORECASE)
except ValueError:
# Match not found
return escape(value)
# Trim pre/post sections to length
if trim_pre and len(pre) > trim_pre:
pre = trim_placeholder + pre[-trim_pre:]
if trim_post and len(post) > trim_post:
post = post[:trim_post] + trim_placeholder
return f'{escape(pre)}<mark>{escape(match)}</mark>{escape(post)}'
|
|
27,841 | 125,350 | 392 | python/ray/_private/state.py | 63 | 33 | def node_table(self):
self._check_connected()
node_table = self.global_state_accessor.get_node_table()
results = []
for node_info_item in node_table:
item = gcs_utils.GcsNodeInfo.FromString(node_info_item)
node_info = {
"NodeID": ray._private.utils.binary_to_hex(item.node_id),
"Alive": item.state
== gcs_utils.GcsNodeInfo.GcsNodeState.Value("ALIVE"),
"NodeManagerAddress": item.node_manager_address,
"NodeManagerHostname": item.node_manager_hostname,
"NodeManagerPort": item.node_manager_port,
"ObjectManagerPort": item.object_manager_port,
"ObjectStoreSocketName": item.object_store_socket_name,
"RayletSocketName": item.raylet_socket_name,
"MetricsExportPort": item.metrics_export_port,
"NodeName": item.node_name,
}
node_info["alive"] = node_info["Alive"]
node_info["Resources"] = (
{key: value for key, value in item.resources_total.items()}
if node_info["Alive"]
else {}
)
results.append(node_info)
return results
| [Python]More efficient node_table() in state.py (#26760)
This picks up https://github.com/ray-project/ray/pull/24088
The `get_node_table` already has resources of nodes, so we don't need to invoke `get_node_resource_info` for every node again. This change will reduce lots of rpc calls and make the api more efficient. | node_table | 62288724b2b4add7ad9b12ff5299559caaa5fb55 | ray | state.py | 15 | 27 | https://github.com/ray-project/ray.git | 4 | 172 | 0 | 53 | 288 | Python | {
"docstring": "Fetch and parse the Gcs node info table.\n\n Returns:\n Information about the node in the cluster.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 16,
"vocab_size": 13
} | def node_table(self):
self._check_connected()
node_table = self.global_state_accessor.get_node_table()
results = []
for node_info_item in node_table:
item = gcs_utils.GcsNodeInfo.FromString(node_info_item)
node_info = {
"NodeID": ray._private.utils.binary_to_hex(item.node_id),
"Alive": item.state
== gcs_utils.GcsNodeInfo.GcsNodeState.Value("ALIVE"),
"NodeManagerAddress": item.node_manager_address,
"NodeManagerHostname": item.node_manager_hostname,
"NodeManagerPort": item.node_manager_port,
"ObjectManagerPort": item.object_manager_port,
"ObjectStoreSocketName": item.object_store_socket_name,
"RayletSocketName": item.raylet_socket_name,
"MetricsExportPort": item.metrics_export_port,
"NodeName": item.node_name,
}
node_info["alive"] = node_info["Alive"]
node_info["Resources"] = (
{key: value for key, value in item.resources_total.items()}
if node_info["Alive"]
else {}
)
results.append(node_info)
return results
|
|
81,097 | 273,174 | 99 | keras/layers/preprocessing/index_lookup.py | 15 | 9 | def vocabulary_size(self):
if tf.executing_eagerly():
return (
int(self.lookup_table.size().numpy())
+ self._token_start_index()
)
else:
return self.looku | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | vocabulary_size | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | index_lookup.py | 16 | 8 | https://github.com/keras-team/keras.git | 2 | 52 | 0 | 12 | 90 | Python | {
"docstring": "Gets the current size of the layer's vocabulary.\n\n Returns:\n The integer size of the vocabulary, including optional mask and oov indices.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 21,
"vocab_size": 17
} | def vocabulary_size(self):
if tf.executing_eagerly():
return (
int(self.lookup_table.size().numpy())
+ self._token_start_index()
)
else:
return self.lookup_table.size() + self._token_start_index()
|
|
53,812 | 215,095 | 114 | tests/pytests/unit/modules/test_aixpkg.py | 38 | 17 | def test_version_with_invalid_names():
lslpp_mydog_out =
ver_chk = MagicMock(return_value={"retcode": 1, "stdout": lslpp_mydog_out})
with patch.dict(aixpkg.__grains | Working tests for install | test_version_with_invalid_names | f1c37893caf90738288e789c3233ab934630254f | salt | test_aixpkg.py | 12 | 31 | https://github.com/saltstack/salt.git | 1 | 92 | 0 | 33 | 161 | Python | {
"docstring": "\n test version of packages\n lslpp: Fileset mydog not installed.\n\n\nState codes: \n A -- Applied. \n B -- Broken. \n C -- Committed. \n E -- EFIX Locked. \n O -- Obsolete. (partially migrated to newer version) \n ? -- Inconsistent State...Run lppchk -v. \n\nType codes: \n F -- Installp Fileset \n P -- Product \n C -- Component \n T -- Feature \n R -- RPM Package \n E -- Interim Fix \n",
"language": "en",
"n_whitespaces": 80,
"n_words": 61,
"vocab_size": 46
} | def test_version_with_invalid_names():
lslpp_mydog_out =
ver_chk = MagicMock(return_value={"retcode": 1, "stdout": lslpp_mydog_out})
with patch.dict(aixpkg.__grains__, {"osarch": "PowerPC_POWER8"}), patch.dict(
aixpkg.__salt__,
{"cmd.run_all": ver_chk},
):
versions_checked = aixpkg.version(
"mydog", versions_as_list=True, use_context=False
)
assert ver_chk.call_count == 1
ver_chk.assert_called_with("lslpp -Lq mydog", python_shell=False)
assert versions_checked == ""
|
|
7,878 | 43,222 | 13 | tests/models/test_dagrun.py | 7 | 4 | def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session):
with dag_make | Fix mapped task immutability after clear (#23667)
We should be able to detect if the structure of mapped task has changed
and verify the integrity.
This PR ensures this
Co-authored-by: Tzu-ping Chung <[email protected]> | test_mapped_literal_length_increase_adds_additional_ti | b692517ce3aafb276e9d23570e9734c30a5f3d1f | airflow | test_dagrun.py | 11 | 29 | https://github.com/apache/airflow.git | 3 | 233 | 0 | 7 | 34 | Python | {
"docstring": "Test that when the length of mapped literal increases, additional ti is added",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session):
with dag_maker(session=session) as dag:
|
|
19,263 | 96,012 | 87 | tests/sentry/integrations/bitbucket/test_installed.py | 31 | 20 | def test_installed_without_username(self):
# Remove username to simulate privacy mode
del self.user_data_from_bitbucket["principal"]["username"]
response = self.client.post(self.path, data=self.user_data_from_bitbucket)
assert response.status_code == 200
integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)
assert integration.name == self.user_display_name
assert integration.metadata == self | fix(bitbucket): Fix domain name (#31536)
* fix(bitbucket): Fix domain name | test_installed_without_username | 2790a30b7f6a6cffa2cd1aa69c678327a41a0664 | sentry | test_installed.py | 10 | 7 | https://github.com/getsentry/sentry.git | 1 | 76 | 0 | 26 | 122 | Python | {
"docstring": "Test a user (not team) installation where the user has hidden their username from public view",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 15
} | def test_installed_without_username(self):
# Remove username to simulate privacy mode
del self.user_data_from_bitbucket["principal"]["username"]
response = self.client.post(self.path, data=self.user_data_from_bitbucket)
assert response.status_code == 200
integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)
assert integration.name == self.user_display_name
assert integration.metadata == self.user_metadata
|
|
21,273 | 101,891 | 29 | lib/gui/display.py | 8 | 5 | def _command_display(self, command):
| Typing - lib.gui.display_command | _command_display | dab823a3eb7a5257cb1e0818ee10ed234d3de97f | faceswap | display.py | 10 | 3 | https://github.com/deepfakes/faceswap.git | 1 | 20 | 0 | 8 | 39 | Python | {
"docstring": " Build the relevant command specific tabs based on the incoming Faceswap command.\n\n Parameters\n ----------\n command: str\n The Faceswap command that is being executed\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 23,
"vocab_size": 20
} | def _command_display(self, command):
build_tabs = getattr(self, f"_{command}_tabs")
build_tabs()
|
|
13,532 | 63,924 | 49 | .venv/lib/python3.8/site-packages/pip/_vendor/urllib3/_collections.py | 13 | 7 | def itermerged(self):
for key in s | upd; format | itermerged | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | _collections.py | 13 | 4 | https://github.com/jindongwang/transferlearning.git | 2 | 39 | 0 | 13 | 66 | Python | {
"docstring": "Iterate over all headers, merging duplicate ones together.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def itermerged(self):
for key in self:
val = self._container[key.lower()]
yield val[0], ", ".join(val[1:])
|
|
57,167 | 224,020 | 19 | mkdocs/structure/files.py | 5 | 7 | def get_file_from_path(self, path):
return self.src_paths.get(os.path.normpath(path))
| Remove spaces at the ends of docstrings, normalize quotes | get_file_from_path | e7f07cc82ab2be920ab426ba07456d8b2592714d | mkdocs | files.py | 10 | 2 | https://github.com/mkdocs/mkdocs.git | 1 | 24 | 0 | 5 | 40 | Python | {
"docstring": "Return a File instance with File.src_path equal to path.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def get_file_from_path(self, path):
return self.src_paths.get(os.path.normpath(path))
|
|
@pytest.fixture | 52,203 | 208,104 | 56 | t/unit/conftest.py | 23 | 11 | def sleepdeprived(request):
module = request.node.get_closest_marker(
"sleepdeprived_patched_module").args[0]
old_sleep, module.sleep = module.sleep, noop
try:
yield
finally:
module.sleep = old_sleep
| Canvas Header Stamping (#7384)
* Strip down the header-stamping PR to the basics.
* Serialize groups.
* Add groups to result backend meta data.
* Fix spelling mistake.
* Revert changes to canvas.py
* Revert changes to app/base.py
* Add stamping implementation to canvas.py
* Send task to AMQP with groups.
* Successfully pass single group to result.
* _freeze_gid dict merge fixed
* First draft of the visitor API.
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* OptionsVisitor created
* Fixed canvas.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test for simple test for chord and fixed chord implementation
* Changed _IMMUTABLE_OPTIONS
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed list order
* Fixed tests (stamp test and chord test), fixed order in groups
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Fixed lint and elements
* Changed implementation of stamp API and fix lint
* Added documentation to Stamping API. Added chord with groups test
* Implemented stamping inside replace and added test for an implementation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Splitted into subtests
* Group stamping rollback
* group.id is None fixed
* Added integration test
* Added integration test
* apply_async fixed
* Integration test and test_chord fixed
* Lint fixed
* chord freeze fixed
* Minor fixes.
* Chain apply_async fixed and tests fixed
* lint fixed
* Added integration test for chord
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* type -> isinstance
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Redo header stamping (#7341)
* _freeze_gid dict merge fixed
* OptionsVisitor created
* Fixed canvas.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test for simple test for chord and fixed chord implementation
* Changed _IMMUTABLE_OPTIONS
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed list order
* Fixed tests (stamp test and chord test), fixed order in groups
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Fixed lint and elements
* Changed implementation of stamp API and fix lint
* Added documentation to Stamping API. Added chord with groups test
* Implemented stamping inside replace and added test for an implementation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Splitted into subtests
* Group stamping rollback
* group.id is None fixed
* Added integration test
* Added integration test
* apply_async fixed
* Integration test and test_chord fixed
* Lint fixed
* chord freeze fixed
* Minor fixes.
* Chain apply_async fixed and tests fixed
* lint fixed
* Added integration test for chord
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* type -> isinstance
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Omer Katz <[email protected]>
* Added stamping mechanism
* Manual stamping improved
* flake8 fixed
* Added subtests
* Add comma.
* Moved groups to stamps
* Fixed chord and added test for that
* Strip down the header-stamping PR to the basics.
* Serialize groups.
* Add groups to result backend meta data.
* Fix spelling mistake.
* Revert changes to canvas.py
* Revert changes to app/base.py
* Add stamping implementation to canvas.py
* Send task to AMQP with groups.
* Successfully pass single group to result.
* _freeze_gid dict merge fixed
* First draft of the visitor API.
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* OptionsVisitor created
* Fixed canvas.py
* Added test for simple test for chord and fixed chord implementation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Changed _IMMUTABLE_OPTIONS
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed list order
* Fixed tests (stamp test and chord test), fixed order in groups
* Fixed lint and elements
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Changed implementation of stamp API and fix lint
* Added documentation to Stamping API. Added chord with groups test
* Implemented stamping inside replace and added test for an implementation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Splitted into subtests
* Group stamping rollback
* group.id is None fixed
* Added integration test
* Added integration test
* apply_async fixed
* Integration test and test_chord fixed
* Lint fixed
* chord freeze fixed
* Minor fixes.
* Chain apply_async fixed and tests fixed
* lint fixed
* Added integration test for chord
* type -> isinstance
* Added stamping mechanism
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Manual stamping improved
* fail_ci_if_error uncommented
* flake8 fixed
* Added subtests
* Changes
* Add comma.
* Fixed chord and added test for that
* canvas.py fixed
* Test chord.py fixed
* Fixed stamped_headers
* collections import fixed
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* collections import fixed
* Update celery/backends/base.py
Co-authored-by: Omer Katz <[email protected]>
* ampq.py fixed
* Refrain from using deprecated import path.
* Fix test_complex_chain regression.
Whenever we stamp a group we need to freeze it first if it wasn't already frozen.
Somewhere along the line, the group id changed because we were freezing twice.
This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow.
We don't know why yet.
* Fixed integration tests
* Fixed integration tests
* Fixed integration tests
* Fixed integration tests
* Fixed issues with maybe_list. Add documentation
* Fixed potential issue with integration tests
* Fixed issues with _regen
* Fixed issues with _regen
* Fixed test_generator issues
* Fixed _regen stamping
* Fixed _regen stamping
* Fixed TimeOut issue
* Fixed TimeOut issue
* Fixed TimeOut issue
* Update docs/userguide/canvas.rst
Co-authored-by: Omer Katz <[email protected]>
* Fixed Couchbase
* Better stamping intro
* New GroupVisitor example
* Adjust documentation.
Co-authored-by: Naomi Elstein <[email protected]>
Co-authored-by: Omer Katz <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Asif Saif Uddin <[email protected]>
Co-authored-by: Omer Katz <[email protected]> | sleepdeprived | 1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc | celery | conftest.py | 11 | 8 | https://github.com/celery/celery.git | 2 | 42 | 1 | 19 | 83 | Python | {
"docstring": "Mock sleep method in patched module to do nothing.\n\n Example:\n >>> import time\n >>> @pytest.mark.sleepdeprived_patched_module(time)\n >>> def test_foo(self, sleepdeprived):\n >>> pass\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 21,
"vocab_size": 18
} | def sleepdeprived(request):
module = request.node.get_closest_marker(
"sleepdeprived_patched_module").args[0]
old_sleep, module.sleep = module.sleep, noop
try:
yield
finally:
module.sleep = old_sleep
# Taken from
# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py
@pytest.fixture |
20,985 | 101,575 | 112 | lib/training/preview_tk.py | 30 | 11 | def _set_mouse_bindings(self) -> None:
logger.debug("Binding mouse events")
if system() == "Linux":
self._canvas.tag_bind(self._canvas.image_id, "<Button-4>", self._on_bound_zoom)
self._canvas.tag_bind | Training - Use custom preview pop-out | _set_mouse_bindings | 7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5 | faceswap | preview_tk.py | 12 | 15 | https://github.com/deepfakes/faceswap.git | 2 | 119 | 0 | 22 | 198 | Python | {
"docstring": " Set the mouse bindings for interacting with the preview image\n\n Mousewheel: Zoom in and out\n Mouse click: Move image\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 17
} | def _set_mouse_bindings(self) -> None:
logger.debug("Binding mouse events")
if system() == "Linux":
self._canvas.tag_bind(self._canvas.image_id, "<Button-4>", self._on_bound_zoom)
self._canvas.tag_bind(self._canvas.image_id, "<Button-5>", self._on_bound_zoom)
else:
self._canvas.tag_bind(self._canvas.image_id, "<MouseWheel>", self._on_bound_zoom)
self._canvas.tag_bind(self._canvas.image_id, "<Button-1>", self._on_mouse_click)
self._canvas.tag_bind(self._canvas.image_id, "<B1-Motion>", self._on_mouse_drag)
logger.debug("Bound mouse events")
|
|
96,701 | 297,739 | 40 | tests/helpers/test_area_registry.py | 22 | 9 | async def test_create_area_with_id_already_in_use(registry):
| Add aliases to area registry items (#84294)
* Add aliases to area registry items
* Update test
* Fix WS API | test_create_area_with_id_already_in_use | 1a42bd5c4cb51ffbfcaf8d5389b80a228712ac81 | core | test_area_registry.py | 10 | 6 | https://github.com/home-assistant/core.git | 1 | 50 | 0 | 17 | 90 | Python | {
"docstring": "Make sure that we can't create an area with a name already in use.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | async def test_create_area_with_id_already_in_use(registry):
area1 = registry.async_create("mock")
updated_area1 = registry.async_update(area1.id, name="New Name")
assert updated_area1.id == area1.id
area2 = registry.async_create("mock")
assert area2.id == "mock_2"
|
|
10,074 | 50,265 | 147 | modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/ernie_modeling.py | 43 | 20 | def forward(self, *args, **kwargs):
labels = kwargs.pop('label | add disco_diffusion_ernievil_base | forward | ffcde21305c61d950a9f93e57e6180c9a9665b87 | PaddleHub | ernie_modeling.py | 12 | 12 | https://github.com/PaddlePaddle/PaddleHub.git | 3 | 99 | 0 | 32 | 160 | Python | {
"docstring": "\n Args:\n labels (optional, `Variable` of shape [batch_size]):\n ground truth label id for each sentence\n Returns:\n loss (`Variable` of shape []):\n Cross entropy loss mean over batch\n if labels not set, returns None\n logits (`Variable` of shape [batch_size, hidden_size]):\n output logits of classifier\n ",
"language": "en",
"n_whitespaces": 157,
"n_words": 42,
"vocab_size": 33
} | def forward(self, *args, **kwargs):
labels = kwargs.pop('labels', None)
pooled, encoded = super(ErnieModelForSequenceClassification, self).forward(*args, **kwargs)
hidden = self.dropout(pooled)
logits = self.classifier(hidden)
if labels is not None:
if len(labels.shape) != 1:
labels = labels.squeeze()
loss = F.cross_entropy(logits, labels)
else:
loss = None
return loss, logits
|
|
12,832 | 62,023 | 23 | .venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py | 9 | 4 | def _get_project(self, name):
raise NotImplemen | upd; format | _get_project | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | locators.py | 8 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 13 | 0 | 9 | 25 | Python | {
"docstring": "\n For a given project, get a dictionary mapping available versions to Distribution\n instances.\n\n This should be implemented in subclasses.\n\n If called from a locate() request, self.matcher will be set to a\n matcher for the requirement to satisfy, otherwise it will be None.\n ",
"language": "en",
"n_whitespaces": 85,
"n_words": 42,
"vocab_size": 34
} | def _get_project(self, name):
raise NotImplementedError('Please implement in the subclass')
|
|
38,470 | 160,031 | 51 | numpy/core/tests/test_multiarray.py | 16 | 12 | def test_pickle_empty(self):
arr = np.array([]).reshape(999999, 0)
pk_dmp = pickle.dumps(arr)
pk_load = pickle.loads(pk_dmp)
assert pk_load.size == 0
| BUG: Fix unpickling an empty ndarray with a none-zero dimension (#21067)
Changing num to the number of bytes in the input array, PyArray_NBYTES(self). Solves #21009.
* Fixing nbyte size in methods.c:memcpy
* Adding a test
* Re-adding removed newline
* Shrinking the test array to save memory | test_pickle_empty | 935fe83ddaa3250d176bc848579ffdc4e1017090 | numpy | test_multiarray.py | 11 | 5 | https://github.com/numpy/numpy.git | 1 | 44 | 0 | 14 | 73 | Python | {
"docstring": "Checking if an empty array pickled and un-pickled will not cause a\n segmentation fault",
"language": "en",
"n_whitespaces": 20,
"n_words": 14,
"vocab_size": 14
} | def test_pickle_empty(self):
arr = np.array([]).reshape(999999, 0)
pk_dmp = pickle.dumps(arr)
pk_load = pickle.loads(pk_dmp)
assert pk_load.size == 0
|
|
7,461 | 42,022 | 125 | seaborn/_oldcore.py | 46 | 8 | def get_semantics(cls, kwargs, semantics=None):
# TODO this should be get_variables since we have included x and y
if semantics is None:
semantics = | docs: fix typos (#2899)
* Small typo fixes
* Catch an additional typo
Co-authored-by: Michael Waskom <[email protected]> | get_semantics | 5910d6ef50196c8bd1f4ed40a5da202a39d7f62c | seaborn | _oldcore.py | 11 | 8 | https://github.com/mwaskom/seaborn.git | 5 | 55 | 0 | 34 | 88 | Python | {
"docstring": "Subset a dictionary arguments with known semantic variables.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def get_semantics(cls, kwargs, semantics=None):
# TODO this should be get_variables since we have included x and y
if semantics is None:
semantics = cls.semantics
variables = {}
for key, val in kwargs.items():
if key in semantics and val is not None:
variables[key] = val
return variables
|
|
2,995 | 19,485 | 176 | pipenv/utils/dependencies.py | 72 | 37 | def convert_deps_to_pip(deps, project=None, r=True, include_index=True):
from pipenv.vendor.requirementslib.models.requirements import Requirement
dependencies = []
for dep_name, dep in deps.items():
if project:
project.clear_pipfile_cache()
indexes = getattr(project, "pipfile_sources", []) if project is not None else []
new_dep = Requirement.from_pipfile(dep_name, dep)
if new_dep.index:
include_index = True
req = new_dep.as_line(sources=indexes if include_index else None).strip()
dependencies.append(req)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
from pipenv.vendor.vistir.path import create_tracked_tempfile
f = create_tracked_tempfile(suffix="-requirements.txt", delete=Fa | Code reorg utils into utils module reduces complexity (#4990)
* Split apart the massive utils.py into a utils module | convert_deps_to_pip | 3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8 | pipenv | dependencies.py | 14 | 19 | https://github.com/pypa/pipenv.git | 7 | 167 | 0 | 55 | 266 | Python | {
"docstring": "\"Converts a Pipfile-formatted dependency to a pip-formatted one.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 7
} | def convert_deps_to_pip(deps, project=None, r=True, include_index=True):
from pipenv.vendor.requirementslib.models.requirements import Requirement
dependencies = []
for dep_name, dep in deps.items():
if project:
project.clear_pipfile_cache()
indexes = getattr(project, "pipfile_sources", []) if project is not None else []
new_dep = Requirement.from_pipfile(dep_name, dep)
if new_dep.index:
include_index = True
req = new_dep.as_line(sources=indexes if include_index else None).strip()
dependencies.append(req)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
from pipenv.vendor.vistir.path import create_tracked_tempfile
f = create_tracked_tempfile(suffix="-requirements.txt", delete=False)
f.write("\n".join(dependencies).encode("utf-8"))
f.close()
return f.name
|
|
24,919 | 113,475 | 93 | nni/algorithms/hpo/hyperband_advisor.py | 25 | 9 | def handle_trial_end(self, data):
hyper_params = nni.load(data['hyper_params'])
if self.is_created_in_previous_exp(hyper_params['parameter_id']):
# The end of the recovered trial is ignored
return
self._handle_trial_end(hyper_params['parameter_id'])
if data['trial_job_id'] | [nas] fix issue introduced by the trial recovery feature (#5109) | handle_trial_end | bcc640c4e5e687a03fe21503692dad96e0b97fa7 | nni | hyperband_advisor.py | 10 | 7 | https://github.com/microsoft/nni.git | 3 | 60 | 0 | 24 | 105 | Python | {
"docstring": "\n Parameters\n ----------\n data: dict()\n it has three keys: trial_job_id, event, hyper_params\n trial_job_id: the id generated by training service\n event: the job's state\n hyper_params: the hyperparameters (a string) generated and returned by tuner\n ",
"language": "en",
"n_whitespaces": 105,
"n_words": 32,
"vocab_size": 28
} | def handle_trial_end(self, data):
hyper_params = nni.load(data['hyper_params'])
if self.is_created_in_previous_exp(hyper_params['parameter_id']):
# The end of the recovered trial is ignored
return
self._handle_trial_end(hyper_params['parameter_id'])
if data['trial_job_id'] in self.job_id_para_id_map:
del self.job_id_para_id_map[data['trial_job_id']]
|
|
56,021 | 220,508 | 115 | python3.10.4/Lib/asyncio/futures.py | 29 | 11 | def _copy_future_state(source, dest):
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(_convert_future_exc(exception))
else:
result = source.result()
dest.set_resul | add python 3.10.4 for windows | _copy_future_state | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | futures.py | 14 | 14 | https://github.com/XX-net/XX-Net.git | 4 | 80 | 0 | 22 | 138 | Python | {
"docstring": "Internal helper to copy state from another Future.\n\n The other Future may be a concurrent.futures.Future.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 15,
"vocab_size": 15
} | def _copy_future_state(source, dest):
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(_convert_future_exc(exception))
else:
result = source.result()
dest.set_result(result)
|
|
76,648 | 261,047 | 80 | sklearn/utils/tests/test_validation.py | 41 | 16 | def test_get_feature_names_invalid_dtypes(names, dtypes):
pd = | MAINT Clean deprecation for 1.2: validation (#24493)
* cln deprecations
* cln
* fix tst switch to pytest.raises | test_get_feature_names_invalid_dtypes | 9f9f1684e91fbfffbc446f786a8c64628b752efb | scikit-learn | test_validation.py | 11 | 9 | https://github.com/scikit-learn/scikit-learn.git | 1 | 74 | 0 | 34 | 123 | Python | {
"docstring": "Get feature names errors when the feature names have mixed dtypes",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 9
} | def test_get_feature_names_invalid_dtypes(names, dtypes):
pd = pytest.importorskip("pandas")
X = pd.DataFrame([[1, 2], [4, 5], [5, 6]], columns=names)
msg = re.escape(
"Feature names only support names that are all strings. "
f"Got feature names with dtypes: {dtypes}."
)
with pytest.raises(TypeError, match=msg):
names = _get_feature_names(X)
|
|
73,739 | 251,435 | 417 | mitmproxy/platform/pf.py | 133 | 17 | def lookup(address, port, s):
# We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.
# Those still appear as "127.0.0.1" in the table, so we need to strip the prefix.
address = re.sub(r"^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address)
s = s.decode()
# ALL tcp 192.168.1.13:57474 -> 23.205.82.58:443 ESTABLISHED:ESTABLISHED
specv4 = f"{address}:{port}"
# ALL tcp 2a01:e35:8bae:50f0:9d9b:ef0d:2de3:b733[58505] -> 2606:4700:30::681f:4ad0[443] ESTABLISHED:ESTABLISHED
specv6 = f"{address}[{port}]"
for i in s.split("\n"):
if "ESTABLISHED:ESTABLISHED" in i and specv4 in i:
s = i.split()
if len(s) > 4:
if sys.platform.startswith("freebsd"):
# strip parentheses for FreeBSD pfctl
s = s[3][1:-1].split(":")
else:
s = s[4].split(":")
if len(s) == 2:
return s[0], int(s[1])
elif "ESTABLISHED:ESTABLISHED" in i and specv6 in i:
s = i.split()
if len(s) > 4:
s = s[4].split("[")
port = s[1].split("]")
port = port[0]
return s[0], int(port)
raise RuntimeError("Could not resolve original | make it black! | lookup | b3587b52b25077f68116b9852b041d33e7fc6601 | mitmproxy | pf.py | 19 | 23 | https://github.com/mitmproxy/mitmproxy.git | 10 | 200 | 0 | 82 | 358 | Python | {
"docstring": "\n Parse the pfctl state output s, to look up the destination host\n matching the client (address, port).\n\n Returns an (address, port) tuple, or None.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 24,
"vocab_size": 21
} | def lookup(address, port, s):
# We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.
# Those still appear as "127.0.0.1" in the table, so we need to strip the prefix.
address = re.sub(r"^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address)
s = s.decode()
# ALL tcp 192.168.1.13:57474 -> 23.205.82.58:443 ESTABLISHED:ESTABLISHED
specv4 = f"{address}:{port}"
# ALL tcp 2a01:e35:8bae:50f0:9d9b:ef0d:2de3:b733[58505] -> 2606:4700:30::681f:4ad0[443] ESTABLISHED:ESTABLISHED
specv6 = f"{address}[{port}]"
for i in s.split("\n"):
if "ESTABLISHED:ESTABLISHED" in i and specv4 in i:
s = i.split()
if len(s) > 4:
if sys.platform.startswith("freebsd"):
# strip parentheses for FreeBSD pfctl
s = s[3][1:-1].split(":")
else:
s = s[4].split(":")
if len(s) == 2:
return s[0], int(s[1])
elif "ESTABLISHED:ESTABLISHED" in i and specv6 in i:
s = i.split()
if len(s) > 4:
s = s[4].split("[")
port = s[1].split("]")
port = port[0]
return s[0], int(port)
raise RuntimeError("Could not resolve original destination.")
|
|
18,185 | 86,903 | 155 | src/sentry/models/projectownership.py | 51 | 19 | def _hydrate_rules(cls, project_id, rules, type=OwnerRuleType.OWNERSHIP_RULE.value):
owners = [owner for rule in r | feat(commit-context): Refactor Issue Owner auto-assignment (#40048)
## Objective:
This PR refactors how we calculate the Issue Owners from Code
Owners/Ownership Rules and who should get the auto-assignment. Auto
Assignment will first go to the Suspect Committer (if it exists and the
setting is on) then to Issue Owners (if it exists and the setting is on)
then nothing. We will also store the rule that triggered the Issue Owner
match in GroupOwner. | _hydrate_rules | 712ba34a4d51be636396e70557aa3f99471814be | sentry | projectownership.py | 14 | 12 | https://github.com/getsentry/sentry.git | 8 | 96 | 0 | 32 | 139 | Python | {
"docstring": "\n Get the last matching rule to take the most precedence.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 9
} | def _hydrate_rules(cls, project_id, rules, type=OwnerRuleType.OWNERSHIP_RULE.value):
owners = [owner for rule in rules for owner in rule.owners]
actors = {
key: val
for key, val in resolve_actors({owner for owner in owners}, project_id).items()
if val
}
result = [
(rule, ActorTuple.resolve_many([actors[owner] for owner in rule.owners]), type)
for rule in rules
]
return result
|
|
11,603 | 56,999 | 60 | src/prefect/blocks/kubernetes.py | 10 | 8 | def activate(self) -> str:
load_kube_config_from_dict(
config_dict=s | add test coerage for get_api_client and activate | activate | 8f3ffd09dc47bfd2af6a635cc04c640febffd519 | prefect | kubernetes.py | 9 | 11 | https://github.com/PrefectHQ/prefect.git | 1 | 29 | 0 | 10 | 48 | Python | {
"docstring": "\n Convenience method for activating the k8s config stored in an instance of this block\n\n Returns current_context for sanity check\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 18
} | def activate(self) -> str:
load_kube_config_from_dict(
config_dict=self.config,
context=self.context,
)
return self.current_context()
|
|
48,731 | 197,875 | 65 | sympy/core/expr.py | 18 | 8 | def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]:
| add some type hints to expr.py | as_coeff_add | 675e6d6ca7aa63ce26f8aa0ca2467976b6570113 | sympy | expr.py | 12 | 35 | https://github.com/sympy/sympy.git | 3 | 49 | 0 | 15 | 77 | Python | {
"docstring": "Return the tuple (c, args) where self is written as an Add, ``a``.\n\n c should be a Rational added to any terms of the Add that are\n independent of deps.\n\n args should be a tuple of all other terms of ``a``; args is empty\n if self is a Number or if self is independent of deps (when given).\n\n This should be used when you do not know if self is an Add or not but\n you want to treat self as an Add or if you want to process the\n individual arguments of the tail of self as an Add.\n\n - if you know self is an Add and want only the head, use self.args[0];\n - if you do not want to process the arguments of the tail but need the\n tail then use self.as_two_terms() which gives the head and tail.\n - if you want to split self into an independent and dependent parts\n use ``self.as_independent(*deps)``\n\n >>> from sympy import S\n >>> from sympy.abc import x, y\n >>> (S(3)).as_coeff_add()\n (3, ())\n >>> (3 + x).as_coeff_add()\n (3, (x,))\n >>> (3 + x + y).as_coeff_add(x)\n (y + 3, (x,))\n >>> (3 + y).as_coeff_add(x)\n (y + 3, ())\n\n ",
"language": "en",
"n_whitespaces": 360,
"n_words": 195,
"vocab_size": 91
} | def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]:
if deps:
if not self.has_free(*deps):
return self, tuple()
return S.Zero, (self,)
|
|
5,414 | 30,229 | 277 | spotdl/console/web.py | 112 | 19 | def create_github_url(url):
repo_only_url = re.compile(
r"https:\/\/github\.com\/[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,38}\/[a-zA-Z0-9]+$"
)
re_branch = re.compile("/(tree|blob)/(.+?)/")
# Check if the given url is a url to a GitHub repo. If it is, tell the
# user | update web code
Co-Authored-By: Peyton Creery <[email protected]> | create_github_url | bbb7a02ef889134af71593102bc6f65035ab14cb | spotify-downloader | web.py | 19 | 23 | https://github.com/spotDL/spotify-downloader.git | 3 | 111 | 0 | 71 | 198 | Python | {
"docstring": "\n From the given url, produce a URL that is compatible with Github's REST API. Can handle blob or tree paths.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 20,
"vocab_size": 20
} | def create_github_url(url):
repo_only_url = re.compile(
r"https:\/\/github\.com\/[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,38}\/[a-zA-Z0-9]+$"
)
re_branch = re.compile("/(tree|blob)/(.+?)/")
# Check if the given url is a url to a GitHub repo. If it is, tell the
# user to use 'git clone' to download it
if re.match(repo_only_url, url):
print(
"✘ The given url is a complete repository. Use 'git clone' to download the repository",
"red",
)
sys.exit()
# extract the branch name from the given url (e.g master)
branch = re_branch.search(url)
if branch:
download_dirs = url[branch.end() :]
api_url = (
url[: branch.start()].replace("github.com", "api.github.com/repos", 1)
+ "/contents/"
+ download_dirs
+ "?ref="
+ branch.group(2)
)
return api_url, download_dirs
raise ValueError("The given url is not a valid GitHub url")
# Modification of https://github.com/sdushantha/gitdir/blob/master/gitdir/gitdir.py |
|
75,819 | 259,555 | 12 | sklearn/metrics/cluster/_supervised.py | 6 | 4 | def homogeneity_score(labels_true, labels_pred):
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
| DOC Ensures that homogeneity_score passes numpydoc validation (#23006) | homogeneity_score | 4253eace9893eb6aef36ca631e7978b6a8808fbc | scikit-learn | _supervised.py | 8 | 2 | https://github.com/scikit-learn/scikit-learn.git | 1 | 18 | 0 | 6 | 29 | Python | {
"docstring": "Homogeneity metric of a cluster labeling given a ground truth.\n\n A clustering result satisfies homogeneity if all of its clusters\n contain only data points which are members of a single class.\n\n This metric is independent of the absolute values of the labels:\n a permutation of the class or cluster label values won't change the\n score value in any way.\n\n This metric is not symmetric: switching ``label_true`` with ``label_pred``\n will return the :func:`completeness_score` which will be different in\n general.\n\n Read more in the :ref:`User Guide <homogeneity_completeness>`.\n\n Parameters\n ----------\n labels_true : int array, shape = [n_samples]\n Ground truth class labels to be used as a reference.\n\n labels_pred : array-like of shape (n_samples,)\n Cluster labels to evaluate.\n\n Returns\n -------\n homogeneity : float\n Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.\n\n See Also\n --------\n completeness_score : Completeness metric of cluster labeling.\n v_measure_score : V-Measure (NMI with arithmetic mean option).\n\n References\n ----------\n\n .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A\n conditional entropy-based external cluster evaluation measure\n <https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_\n\n Examples\n --------\n\n Perfect labelings are homogeneous::\n\n >>> from sklearn.metrics.cluster import homogeneity_score\n >>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])\n 1.0\n\n Non-perfect labelings that further split classes into more clusters can be\n perfectly homogeneous::\n\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))\n 1.000000\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))\n 1.000000\n\n Clusters that include samples from different classes do not make for an\n homogeneous labeling::\n\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))\n 0.0...\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))\n 0.0...\n ",
"language": "en",
"n_whitespaces": 443,
"n_words": 263,
"vocab_size": 162
} | def homogeneity_score(labels_true, labels_pred):
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
|
|
49,376 | 199,720 | 62 | sympy/polys/orthopolys.py | 33 | 12 | def dup_chebyshevt(n, K):
if n | Restore domain elements in dup_* functions | dup_chebyshevt | 3d30d00c37371f142e6a0e9dc5058752d8c9d401 | sympy | orthopolys.py | 15 | 7 | https://github.com/sympy/sympy.git | 3 | 83 | 0 | 26 | 123 | Python | {
"docstring": "Low-level implementation of Chebyshev polynomials of the first kind.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def dup_chebyshevt(n, K):
if n < 1:
return [K.one]
m2, m1 = [K.one], [K.one, K.zero]
for i in range(2, n+1):
m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2), K), m2, K)
return m1
|
|
57,190 | 224,043 | 20 | mkdocs/tests/base.py | 8 | 8 | def get_markdown_toc(markdown_source):
md = markdown.Markdown(extensions=['toc | Remove spaces at the ends of docstrings, normalize quotes | get_markdown_toc | e7f07cc82ab2be920ab426ba07456d8b2592714d | mkdocs | base.py | 11 | 4 | https://github.com/mkdocs/mkdocs.git | 1 | 28 | 0 | 8 | 50 | Python | {
"docstring": "Return TOC generated by Markdown parser from Markdown source text.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def get_markdown_toc(markdown_source):
md = markdown.Markdown(extensions=['toc'])
md.convert(markdown_source)
return md.toc_tokens
|
|
16,673 | 77,547 | 53 | wagtail/admin/widgets/chooser.py | 10 | 6 | def get_value_data_from_instance(self, instance):
| Split out common logic from get_value_data | get_value_data_from_instance | 39f7886a6f8ee98db7e73ce33d94c06139f35bd8 | wagtail | chooser.py | 11 | 5 | https://github.com/wagtail/wagtail.git | 1 | 28 | 0 | 10 | 49 | Python | {
"docstring": "\n Given a model instance, return a value that we can pass to both the server-side template\n and the client-side rendering code (via telepath) that contains all the information needed\n for display. Typically this is a dict of id, title etc; it must be JSON-serialisable.\n ",
"language": "en",
"n_whitespaces": 73,
"n_words": 44,
"vocab_size": 39
} | def get_value_data_from_instance(self, instance):
return {
"id": instance.pk,
"edit_url": AdminURLFinder().get_edit_url(instance),
}
|
|
53,473 | 212,865 | 10,839 | PySimpleGUI.py | 4,824 | 131 | def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None),
margins=(None, None),
element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None,
slider_border_width=None, slider_relief=None, slider_orientation=None,
autoclose_time=None, message_box_line_width=None,
progress_meter_border_depth=None, progress_meter_style=None,
progress_meter_relief=None, progress_meter_color=None, progress_meter_size=None,
text_justification=None, background_color=None, element_background_color=None,
text_element_background_color=None, input_elements_background_color=None, input_text_color=None,
scrollbar_color=None, text_color=None, element_text_color=None, debug_win_size=(None, None),
window_location=(None, None), error_button_color=(None, None), tooltip_time=None, tooltip_font=None, use_ttk_buttons=None, ttk_theme=None,
suppress_error_popups=None, suppress_raise_key_errors=None, suppress_key_guessing=None,warn_button_key_duplicates=False, enable_treeview_869_patch=None,
enable_mac_notitlebar_patch=None, use_custom_titlebar=None, titlebar_background_color=None, titlebar_text_color=None, titlebar_font=None,
titlebar_icon=None, user_settings_path=None, pysimplegui_settings_path=None, pysimplegui_settings_filename=None, keep_on_top=None, dpi_awareness=None, scaling=None, disable_modal_windows=None, tooltip_offset=(None, None)):
global DEFAULT_ELEMENT_SIZE
global DEFAULT_BUTTON_ELEMENT_SIZE
global DEFAULT_MARGINS # Margins for each LEFT/RIGHT margin is first term
global DEFAULT_ELEMENT_PADDING # Padding between elements (row, col) in pixels
global DEFAULT_AUTOSIZE_TEXT
global DEFAULT_AUTOSIZE_BUTTONS
global DEFAULT_FONT
global DEFAULT_BORDER_WIDTH
global DEFAULT_AUTOCLOSE_TIME
global DEFAULT_BUTTON_COLOR
global MESSAGE_BOX_LINE_WIDTH
global DEFAULT_PROGRESS_BAR_BORDER_WIDTH
global DEFAULT_PROGRESS_BAR_STYLE
global DEFAULT_PROGRESS_BAR_RELIEF
global DEFAULT_PROGRESS_BAR_COLOR
global DEFAULT_PROGRESS_BAR_SIZE
global DEFAULT_TEXT_JUSTIFICATION
global DEFAULT_DEBUG_WINDOW_SIZE
global DEFAULT_SLIDER_BORDER_WIDTH
global DEFAULT_SLIDER_RELIEF
global DEFAULT_SLIDER_ORIENTATION
global DEFAULT_BACKGROUND_COLOR
global DEFAULT_INPUT_ELEMENTS_COLOR
global DEFAULT_ELEMENT_BACKGROUND_COLOR
global DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR
global DEFAULT_SCROLLBAR_COLOR
global DEFAULT_TEXT_COLOR
global DEFAULT_WINDOW_LOCATION
global DEFAULT_ELEMENT_TEXT_COLOR
global DEFAULT_INPUT_TEXT_COLOR
global DEFAULT_TOOLTIP_TIME
global DEFAULT_ERROR_BUTTON_COLOR
global DEFAULT_TTK_THEME
global USE_TTK_BUTTONS
global TOOLTIP_FONT
global SUPPRESS_ERROR_POPUPS
global SUPPRESS_RAISE_KEY_ERRORS
global SUPPRESS_KEY_GUESSING
global WARN_DUPLICATE_BUTTON_KEY_ERRORS
global ENABLE_TREEVIEW_869_PATCH
global ENABLE_MAC_NOTITLEBAR_PATCH
global USE_CUSTOM_TITLEBAR
global CUSTOM_TITLEBAR_BACKGROUND_COLOR
global CUSTOM_TITLEBAR_TEXT_COLOR
global CUSTOM_TITLEBAR_ICON
global CUSTOM_TITLEBAR_FONT
global DEFAULT_USER_SETTINGS_PATH
global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH
global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME
global DEFAULT_KEEP_ON_TOP
global DEFAULT_SCALING
global DEFAULT_MODAL_WINDOWS_ENABLED
global DEFAULT_TOOLTIP_OFFSET
global _pysimplegui_user_settings
# global _my_windows
if icon:
Window._user_defined_icon = icon
# _my_windows._user_defined_icon = icon
if button_color != None:
if button_color == COLOR_SYSTEM_DEFAULT:
DEFAULT_BUTTON_COLOR = (COLOR_SYSTEM_DEFAULT, COLOR_SYSTEM_DEFAULT)
else:
DEFAULT_BUTTON_COLOR = button_color
if element_size != (None, None):
DEFAULT_ELEMENT_SIZE = element_size
if button_element_size != (None, None):
DEFAULT_BUTTON_ELEMENT_SIZE = button_element_size
if margins != (None, None):
DEFAULT_MARGINS = margins
if element_padding != (None, None):
DEFAULT_ELEMENT_PADDING = element_padding
if auto_size_text != None:
DEFAULT_AUTOSIZE_TEXT = auto_size_text
if auto_size_buttons != None:
DEFAULT_AUTOSIZE_BUTTONS = auto_size_buttons
if font != None:
DEFAULT_FONT = font
if border_width != None:
DEFAULT_BORDER_WIDTH = border_width
if autoclose_time != None:
DEFAULT_AUTOCLOSE_TIME = autoclose_time
if message_box_line_width != None:
MESSAGE_BOX_LINE_WIDTH = message_box_line_width
if progress_meter_border_depth != None:
DEFAULT_PROGRESS_BAR_BORDER_WIDTH = progress_meter_border_depth
if progress_meter_style != None:
warnings.warn('You can no longer set a progress bar style. All ttk styles must be the same for the window', UserWarning)
# DEFAULT_PROGRESS_BAR_STYLE = progress_meter_style
if progress_meter_relief != None:
DEFAULT_PROGRESS_BAR_RELIEF = progress_meter_relief
if progress_meter_color != None:
DEFAULT_PROGRESS_BAR_COLOR = progress_meter_color
if progress_meter_size != None:
DEFAULT_PROGRESS_BAR_SIZE = progress_meter_size
if slider_border_width != None:
DEFAULT_SLIDER_BORDER_WIDTH = slider_border_width
if slider_orientation != None:
DEFAULT_SLIDER_ORIENTATION = slider_orientation
if slider_relief != None:
DEFAULT_SLIDER_RELIEF = slider_relief
if text_justification != None:
DEFAULT_TEXT_JUSTIFICATION = text_justification
if background_color != None:
DEFAULT_BACKGROUND_COLOR = background_color
if text_element_background_color != None:
DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR = text_element_background_color
if input_elements_background_color != None:
DEFAULT_INPUT_ELEMENTS_COLOR = input_elements_background_color
if element_background_color != None:
DEFAULT_ELEMENT_BACKGROUND_COLOR = element_background_color
if window_location != (None, None):
DEFAULT_WINDOW_LOCATION = window_location
if debug_win_size != (None, None):
DEFAULT_DEBUG_WINDOW_SIZE = debug_win_size
if text_color != None:
DEFAULT_TEXT_COLOR = text_color
if scrollbar_color != None:
DEFAULT_SCROLLBAR_COLOR = scrollbar_color
if element_text_color != None:
DEFAULT_ELEMENT_TEXT_COLOR = element_text_color
if input_text_color is not None:
DEFAULT_INPUT_TEXT_COLOR = input_text_color
if tooltip_time is not None:
DEFAULT_TOOLTIP_TIME = tooltip_time
if error_button_color != (None, None):
DEFAULT_ERROR_BUTTON_COLOR = error_button_color
if ttk_theme is not None:
DEFAULT_TTK_THEME = ttk_theme
if use_ttk_buttons is not None:
USE_TTK_BUTTONS = use_ttk_buttons
if tooltip_font is not None:
TOOLTIP_FONT = tooltip_font
if suppress_error_popups is not None:
SUPPRESS_ERROR_POPUPS = suppress_error_popups
if suppress_raise_key_errors is not None:
SUPPRESS_RAISE_KEY_ERRORS = suppress_raise_key_errors
if suppress_key_guessing is not None:
SUPPRESS_KEY_GUESSING = suppress_key_guessing
if warn_button_key_duplicates is not None:
WARN_DUPLICATE_BUTTON_KEY_ERRORS = warn_button_key_duplicates
if enable_treeview_869_patch is not None:
ENABLE_TREEVIEW_869_PATCH = enable_treeview_869_patch
if enable_mac_notitlebar_patch is not None:
ENABLE_MAC_NOTITLEBAR_PATCH = enable_mac_notitlebar_patch
if use_custom_titlebar is not None:
USE_CUSTOM_TITLEBAR = use_custom_titlebar
if titlebar_background_color is not None:
CUSTOM_TITLEBAR_BACKGROUND_COLOR = titlebar_background_color
if titlebar_text_color is not None:
CUSTOM_TITLEBAR_TEXT_COLOR = titlebar_text_color
if titlebar_font is not None:
CUSTOM_TITLEBAR_FONT = titlebar_font
if titlebar_icon is not None:
CUSTOM_TITLEBAR_ICON = titlebar_icon
if user_settings_path is not None:
DEFAULT_USER_SETTINGS_PATH = user_settings_path
if pysimplegui_settings_path is not None:
DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH = pysimplegui_settings_path
if pysimplegui_settings_filename is not None:
DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME = pysimplegui_settings_filename
if pysimplegui_settings_filename is not None or pysimplegui_settings_filename is not None:
_pysimplegui_user_settings = UserSettings(filename=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME,
path=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH)
if keep_on_top is not None:
DEFAULT_KEEP_ON_TOP = keep_on_top
if dpi_awareness is True:
if running_windows():
if platform.release() == "7":
ctypes.windll.user32.SetProcessDPIAware()
elif platform.release() == "8" or platform.release() == "10":
ctypes.windll.shcore.SetProcessDpiAwareness(1)
if scaling is not None:
DEFAULT_SCALING = scaling
if disable_modal_windows is not None:
DEFAULT_MODAL_WINDOWS_ENABLED = not disable_modal_windows
if tooltip_offset != (None, None):
DEFAULT_TOOLTIP_OFFSET = tooltip_offset
return True
# ----------------------------------------------------------------- #
# .########.##.....##.########.##.....##.########..######.
# ....##....##.....##.##.......###...###.##.......##....##
# ....##....##.....##.##.......####.####.##.......##......
# ....##....#########.######...##.###.##.######....######.
# ....##....##.....##.##.......##.....##.##.............##
# ....##....##.....##.##.......##.....##.##.......##....##
# ....##....##.....##.########.##.....##.########..######.
# ----------------------------------------------------------------- #
# The official Theme code
#################### ChangeLookAndFeel #######################
# Predefined settings that will change the colors and styles #
# of the elements. #
##############################################################
LOOK_AND_FEEL_TABLE = {
"SystemDefault": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT, "TEXT_INPUT": COLOR_SYSTEM_DEFAULT,
"SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, "PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1,
"SLIDER_DEPTH": 1, "PROGRESS_DEPTH": 0, },
"SystemDefaultForReal": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT,
"TEXT_INPUT": COLOR_SYSTEM_DEFAULT, "SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": COLOR_SYSTEM_DEFAULT,
"PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1, "SLIDER_DEPTH": 1, "PROGRESS_DEPTH": 0, },
"SystemDefault1": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT, "TEXT_INPUT": COLOR_SYSTEM_DEFAULT,
"SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": COLOR_SYSTEM_DEFAULT, "PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1, "SLIDER_DEPTH": 1,
"PROGRESS_DEPTH": 0, },
"Material1": {"BACKGROUND": "#E3F2FD", "TEXT": "#000000", "INPUT": "#86A8FF", "TEXT_INPUT": "#000000", "SCROLL": "#86A8FF",
"BUTTON": ("#FFFFFF", "#5079D3"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 0, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"ACCENT1": "#FF0266", "ACCENT2": "#FF5C93", "ACCENT3": "#C5003C", },
"Material2": {"BACKGROUND": "#FAFAFA", "TEXT": "#000000", "INPUT": "#004EA1", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#5EA7FF",
"BUTTON": ("#FFFFFF", "#0079D3"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 0, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"ACCENT1": "#FF0266", "ACCENT2": "#FF5C93", "ACCENT3": "#C5003C", },
"Reddit": {"BACKGROUND": "#ffffff", "TEXT": "#1a1a1b", "INPUT": "#dae0e6", "TEXT_INPUT": "#222222", "SCROLL": "#a5a4a4", "BUTTON": ("#FFFFFF", "#0079d3"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, "ACCENT1": "#ff5414", "ACCENT2": "#33a8ff",
"ACCENT3": "#dbf0ff", },
"Topanga": {"BACKGROUND": "#282923", "TEXT": "#E7DB74", "INPUT": "#393a32", "TEXT_INPUT": "#E7C855", "SCROLL": "#E7C855", "BUTTON": ("#E7C855", "#284B5A"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, "ACCENT1": "#c15226", "ACCENT2": "#7a4d5f",
"ACCENT3": "#889743", },
"GreenTan": {"BACKGROUND": "#9FB8AD", "TEXT": '#000000', "INPUT": "#F7F3EC", "TEXT_INPUT": "#000000", "SCROLL": "#F7F3EC", "BUTTON": ("#FFFFFF", "#475841"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Dark": {"BACKGROUND": "#404040", "TEXT": "#FFFFFF", "INPUT": "#4D4D4D", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#707070", "BUTTON": ("#FFFFFF", "#004F00"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightGreen": {"BACKGROUND": "#B7CECE", "TEXT": "#000000", "INPUT": "#FDFFF7", "TEXT_INPUT": "#000000", "SCROLL": "#FDFFF7",
"BUTTON": ("#FFFFFF", "#658268"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "ACCENT1": "#76506d",
"ACCENT2": "#5148f1", "ACCENT3": "#0a1c84", "PROGRESS_DEPTH": 0, },
"Dark2": {"BACKGROUND": "#404040", "TEXT": "#FFFFFF", "INPUT": "#FFFFFF", "TEXT_INPUT": "#000000", "SCROLL": "#707070", "BUTTON": ("#FFFFFF", "#004F00"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Black": {"BACKGROUND": "#000000", "TEXT": "#FFFFFF", "INPUT": "#4D4D4D", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#707070", "BUTTON": ("#000000", "#FFFFFF"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Tan": {"BACKGROUND": "#fdf6e3", "TEXT": "#268bd1", "INPUT": "#eee8d5", "TEXT_INPUT": "#6c71c3", "SCROLL": "#eee8d5", "BUTTON": ("#FFFFFF", "#063542"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"TanBlue": {"BACKGROUND": "#e5dece", "TEXT": "#063289", "INPUT": "#f9f8f4", "TEXT_INPUT": "#242834", "SCROLL": "#eee8d5", "BUTTON": ("#FFFFFF", "#063289"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkTanBlue": {"BACKGROUND": "#242834", "TEXT": "#dfe6f8", "INPUT": "#97755c", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#a9afbb",
"BUTTON": ("#FFFFFF", "#063289"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkAmber": {"BACKGROUND": "#2c2825", "TEXT": "#fdcb52", "INPUT": "#705e52", "TEXT_INPUT": "#fdcb52", "SCROLL": "#705e52",
"BUTTON": ("#000000", "#fdcb52"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBlue": {"BACKGROUND": "#1a2835", "TEXT": "#d1ecff", "INPUT": "#335267", "TEXT_INPUT": "#acc2d0", "SCROLL": "#1b6497", "BUTTON": ("#000000", "#fafaf8"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1 | Addition of tooltip_offset parm to set_options call (major hack to get around 8.6.12 problem). Backed out the experiments to try and fix new problem with Ubuntu | set_options | 07bb93d47f01468660a01f42150e87e5cb08d546 | PySimpleGUI | PySimpleGUI.py | 16 | 14 | https://github.com/PySimpleGUI/PySimpleGUI.git | 1 | 255 | 0 | 1,112 | 19,192 | Python | {
"docstring": "\n :param icon: Can be either a filename or Base64 value. For Windows if filename, it MUST be ICO format. For Linux, must NOT be ICO. Most portable is to use a Base64 of a PNG file. This works universally across all OS's\n :type icon: bytes | str\n :param button_color: Color of the button (text, background)\n :type button_color: (str, str) or str\n :param element_size: element size (width, height) in characters\n :type element_size: (int, int)\n :param button_element_size: Size of button\n :type button_element_size: (int, int)\n :param margins: (left/right, top/bottom) tkinter margins around outsize. Amount of pixels to leave inside the window's frame around the edges before your elements are shown.\n :type margins: (int, int)\n :param element_padding: Default amount of padding to put around elements in window (left/right, top/bottom) or ((left, right), (top, bottom))\n :type element_padding: (int, int) or ((int, int),(int,int))\n :param auto_size_text: True if the Widget should be shrunk to exactly fit the number of chars to show\n :type auto_size_text: bool\n :param auto_size_buttons: True if Buttons in this Window should be sized to exactly fit the text on this.\n :type auto_size_buttons: (bool)\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike\n :type font: (str or (str, int[, str]) or None)\n :param border_width: width of border around element\n :type border_width: (int)\n :param slider_border_width: Width of the border around sliders\n :type slider_border_width: (int)\n :param slider_relief: Type of relief to use for sliders\n :type slider_relief: (str)\n :param slider_orientation: ???\n :type slider_orientation: ???\n :param autoclose_time: ???\n :type autoclose_time: ???\n :param message_box_line_width: ???\n :type message_box_line_width: ???\n :param progress_meter_border_depth: ???\n :type progress_meter_border_depth: ???\n :param progress_meter_style: You can no longer set a progress bar style. All ttk styles must be the same for the window\n :type progress_meter_style: ???\n :param progress_meter_relief:\n :type progress_meter_relief: ???\n :param progress_meter_color: ???\n :type progress_meter_color: ???\n :param progress_meter_size: ???\n :type progress_meter_size: ???\n :param text_justification: Default text justification for all Text Elements in window\n :type text_justification: 'left' | 'right' | 'center'\n :param background_color: color of background\n :type background_color: (str)\n :param element_background_color: element background color\n :type element_background_color: (str)\n :param text_element_background_color: text element background color\n :type text_element_background_color: (str)\n :param input_elements_background_color: Default color to use for the background of input elements\n :type input_elements_background_color: (str)\n :param input_text_color: Default color to use for the text for Input elements\n :type input_text_color: (str)\n :param scrollbar_color: Default color to use for the slider trough\n :type scrollbar_color: (str)\n :param text_color: color of the text\n :type text_color: (str)\n :param element_text_color: Default color to use for Text elements\n :type element_text_color: (str)\n :param debug_win_size: window size\n :type debug_win_size: (int, int)\n :param window_location: Default location to place windows. Not setting will center windows on the display\n :type window_location: (int, int) | None\n :param error_button_color: (Default = (None))\n :type error_button_color: ???\n :param tooltip_time: time in milliseconds to wait before showing a tooltip. Default is 400ms\n :type tooltip_time: (int)\n :param tooltip_font: font to use for all tooltips\n :type tooltip_font: str or Tuple[str, int] or Tuple[str, int, str]\n :param use_ttk_buttons: if True will cause all buttons to be ttk buttons\n :type use_ttk_buttons: (bool)\n :param ttk_theme: Theme to use with ttk widgets. Choices (on Windows) include - 'default', 'winnative', 'clam', 'alt', 'classic', 'vista', 'xpnative'\n :type ttk_theme: (str)\n :param suppress_error_popups: If True then error popups will not be shown if generated internally to PySimpleGUI\n :type suppress_error_popups: (bool)\n :param suppress_raise_key_errors: If True then key errors won't be raised (you'll still get popup error)\n :type suppress_raise_key_errors: (bool)\n :param suppress_key_guessing: If True then key errors won't try and find closest matches for you\n :type suppress_key_guessing: (bool)\n :param warn_button_key_duplicates: If True then duplicate Button Keys generate warnings (not recommended as they're expected)\n :type warn_button_key_duplicates: (bool) \n :param enable_treeview_869_patch: If True, then will use the treeview color patch for tk 8.6.9\n :type enable_treeview_869_patch: (bool)\n :param enable_mac_notitlebar_patch: If True then Windows with no titlebar use an alternative technique when tkinter version < 8.6.10\n :type enable_mac_notitlebar_patch: (bool)\n :param use_custom_titlebar: If True then a custom titlebar is used instead of the normal system titlebar\n :type use_custom_titlebar: (bool)\n :param titlebar_background_color: If custom titlebar indicated by use_custom_titlebar, then use this as background color\n :type titlebar_background_color: str | None\n :param titlebar_text_color: If custom titlebar indicated by use_custom_titlebar, then use this as text color\n :type titlebar_text_color: str | None\n :param titlebar_font: If custom titlebar indicated by use_custom_titlebar, then use this as title font\n :type titlebar_font: (str or (str, int[, str]) or None) | None\n :param titlebar_icon: If custom titlebar indicated by use_custom_titlebar, then use this as the icon (file or base64 bytes)\n :type titlebar_icon: bytes | str\n :param user_settings_path: default path for user_settings API calls. Expanded with os.path.expanduser so can contain ~ to represent user\n :type user_settings_path: (str)\n :param pysimplegui_settings_path: default path for the global PySimpleGUI user_settings\n :type pysimplegui_settings_path: (str)\n :param pysimplegui_settings_filename: default filename for the global PySimpleGUI user_settings\n :type pysimplegui_settings_filename: (str)\n :param keep_on_top: If True then all windows will automatically be set to keep_on_top=True\n :type keep_on_top: (bool)\n :param dpi_awareness: If True then will turn on DPI awareness (Windows only at the moment)\n :type dpi_awareness: (bool)\n :param scaling: Sets the default scaling for all windows including popups, etc.\n :type scaling: (float)\n :param disable_modal_windows: If True then all windows, including popups, will not be modal windows\n :type disable_modal_windows: (bool)\n :param tooltip_offset: Offset to use for tooltips as a tuple. These values will be added to the mouse location when the widget was entered.\n :type tooltip_offset: ((None, None) | (int, int))\n :return: None\n :rtype: None\n ",
"language": "en",
"n_whitespaces": 2847,
"n_words": 889,
"vocab_size": 356
} | def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None),
margins=(None, None),
element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None,
slider_border_width=None, slider_relief=None, slider_orientation=None,
autoclose_time=None, message_box_line_width=None,
progress_meter_border_depth=None, progress_meter_style=None,
progress_meter_relief=None, progress_meter_color=None, progress_meter_size=None,
text_justification=None, background_color=None, element_background_color=None,
text_element_background_color=None, input_elements_background_color=None, input_text_color=None,
scrollbar_color=None, text_color=None, element_text_color=None, debug_win_size=(None, None),
window_location=(None, None), error_button_color=(None, None), tooltip_time=None, tooltip_font=None, use_ttk_buttons=None, ttk_theme=None,
suppress_error_popups=None, suppress_raise_key_errors=None, suppress_key_guessing=None,warn_button_key_duplicates=False, enable_treeview_869_patch=None,
enable_mac_notitlebar_patch=None, use_custom_titlebar=None, titlebar_background_color=None, titlebar_text_color=None, titlebar_font=None,
titlebar_icon=None, user_settings_path=None, pysimplegui_settings_path=None, pysimplegui_settings_filename=None, keep_on_top=None, dpi_awareness=None, scaling=None, disable_modal_windows=None, tooltip_offset=(None, None)):
global DEFAULT_ELEMENT_SIZE
global DEFAULT_BUTTON_ELEMENT_SIZE
global DEFAULT_MARGINS # Margins for each LEFT/RIGHT margin is first term
global DEFAULT_ELEMENT_PADDING # Padding between elements (row, col) in pixels
global DEFAULT_AUTOSIZE_TEXT
global DEFAULT_AUTOSIZE_BUTTONS
global DEFAULT_FONT
global DEFAULT_BORDER_WIDTH
global DEFAULT_AUTOCLOSE_TIME
global DEFAULT_BUTTON_COLOR
global MESSAGE_BOX_LINE_WIDTH
global DEFAULT_PROGRESS_BAR_BORDER_WIDTH
global DEFAULT_PROGRESS_BAR_STYLE
global DEFAULT_PROGRESS_BAR_RELIEF
global DEFAULT_PROGRESS_BAR_COLOR
global DEFAULT_PROGRESS_BAR_SIZE
global DEFAULT_TEXT_JUSTIFICATION
global DEFAULT_DEBUG_WINDOW_SIZE
global DEFAULT_SLIDER_BORDER_WIDTH
global DEFAULT_SLIDER_RELIEF
global DEFAULT_SLIDER_ORIENTATION
global DEFAULT_BACKGROUND_COLOR
global DEFAULT_INPUT_ELEMENTS_COLOR
global DEFAULT_ELEMENT_BACKGROUND_COLOR
global DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR
global DEFAULT_SCROLLBAR_COLOR
global DEFAULT_TEXT_COLOR
global DEFAULT_WINDOW_LOCATION
global DEFAULT_ELEMENT_TEXT_COLOR
global DEFAULT_INPUT_TEXT_COLOR
global DEFAULT_TOOLTIP_TIME
global DEFAULT_ERROR_BUTTON_COLOR
global DEFAULT_TTK_THEME
global USE_TTK_BUTTONS
global TOOLTIP_FONT
global SUPPRESS_ERROR_POPUPS
global SUPPRESS_RAISE_KEY_ERRORS
global SUPPRESS_KEY_GUESSING
global WARN_DUPLICATE_BUTTON_KEY_ERRORS
global ENABLE_TREEVIEW_869_PATCH
global ENABLE_MAC_NOTITLEBAR_PATCH
global USE_CUSTOM_TITLEBAR
global CUSTOM_TITLEBAR_BACKGROUND_COLOR
global CUSTOM_TITLEBAR_TEXT_COLOR
global CUSTOM_TITLEBAR_ICON
global CUSTOM_TITLEBAR_FONT
global DEFAULT_USER_SETTINGS_PATH
global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH
global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME
global DEFAULT_KEEP_ON_TOP
global DEFAULT_SCALING
global DEFAULT_MODAL_WINDOWS_ENABLED
global DEFAULT_TOOLTIP_OFFSET
global _pysimplegui_user_settings
# global _my_windows
if icon:
Window._user_defined_icon = icon
# _my_windows._user_defined_icon = icon
if button_color != None:
if button_color == COLOR_SYSTEM_DEFAULT:
DEFAULT_BUTTON_COLOR = (COLOR_SYSTEM_DEFAULT, COLOR_SYSTEM_DEFAULT)
else:
DEFAULT_BUTTON_COLOR = button_color
if element_size != (None, None):
DEFAULT_ELEMENT_SIZE = element_size
if button_element_size != (None, None):
DEFAULT_BUTTON_ELEMENT_SIZE = button_element_size
if margins != (None, None):
DEFAULT_MARGINS = margins
if element_padding != (None, None):
DEFAULT_ELEMENT_PADDING = element_padding
if auto_size_text != None:
DEFAULT_AUTOSIZE_TEXT = auto_size_text
if auto_size_buttons != None:
DEFAULT_AUTOSIZE_BUTTONS = auto_size_buttons
if font != None:
DEFAULT_FONT = font
if border_width != None:
DEFAULT_BORDER_WIDTH = border_width
if autoclose_time != None:
DEFAULT_AUTOCLOSE_TIME = autoclose_time
if message_box_line_width != None:
MESSAGE_BOX_LINE_WIDTH = message_box_line_width
if progress_meter_border_depth != None:
DEFAULT_PROGRESS_BAR_BORDER_WIDTH = progress_meter_border_depth
if progress_meter_style != None:
warnings.warn('You can no longer set a progress bar style. All ttk styles must be the same for the window', UserWarning)
# DEFAULT_PROGRESS_BAR_STYLE = progress_meter_style
if progress_meter_relief != None:
DEFAULT_PROGRESS_BAR_RELIEF = progress_meter_relief
if progress_meter_color != None:
DEFAULT_PROGRESS_BAR_COLOR = progress_meter_color
if progress_meter_size != None:
DEFAULT_PROGRESS_BAR_SIZE = progress_meter_size
if slider_border_width != None:
DEFAULT_SLIDER_BORDER_WIDTH = slider_border_width
if slider_orientation != None:
DEFAULT_SLIDER_ORIENTATION = slider_orientation
if slider_relief != None:
DEFAULT_SLIDER_RELIEF = slider_relief
if text_justification != None:
DEFAULT_TEXT_JUSTIFICATION = text_justification
if background_color != None:
DEFAULT_BACKGROUND_COLOR = background_color
if text_element_background_color != None:
DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR = text_element_background_color
if input_elements_background_color != None:
DEFAULT_INPUT_ELEMENTS_COLOR = input_elements_background_color
if element_background_color != None:
DEFAULT_ELEMENT_BACKGROUND_COLOR = element_background_color
if window_location != (None, None):
DEFAULT_WINDOW_LOCATION = window_location
if debug_win_size != (None, None):
DEFAULT_DEBUG_WINDOW_SIZE = debug_win_size
if text_color != None:
DEFAULT_TEXT_COLOR = text_color
if scrollbar_color != None:
DEFAULT_SCROLLBAR_COLOR = scrollbar_color
if element_text_color != None:
DEFAULT_ELEMENT_TEXT_COLOR = element_text_color
if input_text_color is not None:
DEFAULT_INPUT_TEXT_COLOR = input_text_color
if tooltip_time is not None:
DEFAULT_TOOLTIP_TIME = tooltip_time
if error_button_color != (None, None):
DEFAULT_ERROR_BUTTON_COLOR = error_button_color
if ttk_theme is not None:
DEFAULT_TTK_THEME = ttk_theme
if use_ttk_buttons is not None:
USE_TTK_BUTTONS = use_ttk_buttons
if tooltip_font is not None:
TOOLTIP_FONT = tooltip_font
if suppress_error_popups is not None:
SUPPRESS_ERROR_POPUPS = suppress_error_popups
if suppress_raise_key_errors is not None:
SUPPRESS_RAISE_KEY_ERRORS = suppress_raise_key_errors
if suppress_key_guessing is not None:
SUPPRESS_KEY_GUESSING = suppress_key_guessing
if warn_button_key_duplicates is not None:
WARN_DUPLICATE_BUTTON_KEY_ERRORS = warn_button_key_duplicates
if enable_treeview_869_patch is not None:
ENABLE_TREEVIEW_869_PATCH = enable_treeview_869_patch
if enable_mac_notitlebar_patch is not None:
ENABLE_MAC_NOTITLEBAR_PATCH = enable_mac_notitlebar_patch
if use_custom_titlebar is not None:
USE_CUSTOM_TITLEBAR = use_custom_titlebar
if titlebar_background_color is not None:
CUSTOM_TITLEBAR_BACKGROUND_COLOR = titlebar_background_color
if titlebar_text_color is not None:
CUSTOM_TITLEBAR_TEXT_COLOR = titlebar_text_color
if titlebar_font is not None:
CUSTOM_TITLEBAR_FONT = titlebar_font
if titlebar_icon is not None:
CUSTOM_TITLEBAR_ICON = titlebar_icon
if user_settings_path is not None:
DEFAULT_USER_SETTINGS_PATH = user_settings_path
if pysimplegui_settings_path is not None:
DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH = pysimplegui_settings_path
if pysimplegui_settings_filename is not None:
DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME = pysimplegui_settings_filename
if pysimplegui_settings_filename is not None or pysimplegui_settings_filename is not None:
_pysimplegui_user_settings = UserSettings(filename=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME,
path=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH)
if keep_on_top is not None:
DEFAULT_KEEP_ON_TOP = keep_on_top
if dpi_awareness is True:
if running_windows():
if platform.release() == "7":
ctypes.windll.user32.SetProcessDPIAware()
elif platform.release() == "8" or platform.release() == "10":
ctypes.windll.shcore.SetProcessDpiAwareness(1)
if scaling is not None:
DEFAULT_SCALING = scaling
if disable_modal_windows is not None:
DEFAULT_MODAL_WINDOWS_ENABLED = not disable_modal_windows
if tooltip_offset != (None, None):
DEFAULT_TOOLTIP_OFFSET = tooltip_offset
return True
# ----------------------------------------------------------------- #
# .########.##.....##.########.##.....##.########..######.
# ....##....##.....##.##.......###...###.##.......##....##
# ....##....##.....##.##.......####.####.##.......##......
# ....##....#########.######...##.###.##.######....######.
# ....##....##.....##.##.......##.....##.##.............##
# ....##....##.....##.##.......##.....##.##.......##....##
# ....##....##.....##.########.##.....##.########..######.
# ----------------------------------------------------------------- #
# The official Theme code
#################### ChangeLookAndFeel #######################
# Predefined settings that will change the colors and styles #
# of the elements. #
##############################################################
LOOK_AND_FEEL_TABLE = {
"SystemDefault": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT, "TEXT_INPUT": COLOR_SYSTEM_DEFAULT,
"SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, "PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1,
"SLIDER_DEPTH": 1, "PROGRESS_DEPTH": 0, },
"SystemDefaultForReal": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT,
"TEXT_INPUT": COLOR_SYSTEM_DEFAULT, "SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": COLOR_SYSTEM_DEFAULT,
"PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1, "SLIDER_DEPTH": 1, "PROGRESS_DEPTH": 0, },
"SystemDefault1": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT, "TEXT_INPUT": COLOR_SYSTEM_DEFAULT,
"SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": COLOR_SYSTEM_DEFAULT, "PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1, "SLIDER_DEPTH": 1,
"PROGRESS_DEPTH": 0, },
"Material1": {"BACKGROUND": "#E3F2FD", "TEXT": "#000000", "INPUT": "#86A8FF", "TEXT_INPUT": "#000000", "SCROLL": "#86A8FF",
"BUTTON": ("#FFFFFF", "#5079D3"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 0, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"ACCENT1": "#FF0266", "ACCENT2": "#FF5C93", "ACCENT3": "#C5003C", },
"Material2": {"BACKGROUND": "#FAFAFA", "TEXT": "#000000", "INPUT": "#004EA1", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#5EA7FF",
"BUTTON": ("#FFFFFF", "#0079D3"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 0, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"ACCENT1": "#FF0266", "ACCENT2": "#FF5C93", "ACCENT3": "#C5003C", },
"Reddit": {"BACKGROUND": "#ffffff", "TEXT": "#1a1a1b", "INPUT": "#dae0e6", "TEXT_INPUT": "#222222", "SCROLL": "#a5a4a4", "BUTTON": ("#FFFFFF", "#0079d3"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, "ACCENT1": "#ff5414", "ACCENT2": "#33a8ff",
"ACCENT3": "#dbf0ff", },
"Topanga": {"BACKGROUND": "#282923", "TEXT": "#E7DB74", "INPUT": "#393a32", "TEXT_INPUT": "#E7C855", "SCROLL": "#E7C855", "BUTTON": ("#E7C855", "#284B5A"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, "ACCENT1": "#c15226", "ACCENT2": "#7a4d5f",
"ACCENT3": "#889743", },
"GreenTan": {"BACKGROUND": "#9FB8AD", "TEXT": '#000000', "INPUT": "#F7F3EC", "TEXT_INPUT": "#000000", "SCROLL": "#F7F3EC", "BUTTON": ("#FFFFFF", "#475841"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Dark": {"BACKGROUND": "#404040", "TEXT": "#FFFFFF", "INPUT": "#4D4D4D", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#707070", "BUTTON": ("#FFFFFF", "#004F00"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightGreen": {"BACKGROUND": "#B7CECE", "TEXT": "#000000", "INPUT": "#FDFFF7", "TEXT_INPUT": "#000000", "SCROLL": "#FDFFF7",
"BUTTON": ("#FFFFFF", "#658268"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "ACCENT1": "#76506d",
"ACCENT2": "#5148f1", "ACCENT3": "#0a1c84", "PROGRESS_DEPTH": 0, },
"Dark2": {"BACKGROUND": "#404040", "TEXT": "#FFFFFF", "INPUT": "#FFFFFF", "TEXT_INPUT": "#000000", "SCROLL": "#707070", "BUTTON": ("#FFFFFF", "#004F00"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Black": {"BACKGROUND": "#000000", "TEXT": "#FFFFFF", "INPUT": "#4D4D4D", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#707070", "BUTTON": ("#000000", "#FFFFFF"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Tan": {"BACKGROUND": "#fdf6e3", "TEXT": "#268bd1", "INPUT": "#eee8d5", "TEXT_INPUT": "#6c71c3", "SCROLL": "#eee8d5", "BUTTON": ("#FFFFFF", "#063542"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"TanBlue": {"BACKGROUND": "#e5dece", "TEXT": "#063289", "INPUT": "#f9f8f4", "TEXT_INPUT": "#242834", "SCROLL": "#eee8d5", "BUTTON": ("#FFFFFF", "#063289"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkTanBlue": {"BACKGROUND": "#242834", "TEXT": "#dfe6f8", "INPUT": "#97755c", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#a9afbb",
"BUTTON": ("#FFFFFF", "#063289"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkAmber": {"BACKGROUND": "#2c2825", "TEXT": "#fdcb52", "INPUT": "#705e52", "TEXT_INPUT": "#fdcb52", "SCROLL": "#705e52",
"BUTTON": ("#000000", "#fdcb52"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBlue": {"BACKGROUND": "#1a2835", "TEXT": "#d1ecff", "INPUT": "#335267", "TEXT_INPUT": "#acc2d0", "SCROLL": "#1b6497", "BUTTON": ("#000000", "#fafaf8"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Reds": {"BACKGROUND": "#280001", "TEXT": "#FFFFFF", "INPUT": "#d8d584", "TEXT_INPUT": "#000000", "SCROLL": "#763e00", "BUTTON": ("#000000", "#daad28"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Green": {"BACKGROUND": "#82a459", "TEXT": "#000000", "INPUT": "#d8d584", "TEXT_INPUT": "#000000", "SCROLL": "#e3ecf3", "BUTTON": ("#FFFFFF", "#517239"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"BluePurple": {"BACKGROUND": "#A5CADD", "TEXT": "#6E266E", "INPUT": "#E0F5FF", "TEXT_INPUT": "#000000", "SCROLL": "#E0F5FF",
"BUTTON": ("#FFFFFF", "#303952"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Purple": {"BACKGROUND": "#B0AAC2", "TEXT": "#000000", "INPUT": "#F2EFE8", "SCROLL": "#F2EFE8", "TEXT_INPUT": "#000000", "BUTTON": ("#000000", "#C2D4D8"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"BlueMono": {"BACKGROUND": "#AAB6D3", "TEXT": "#000000", "INPUT": "#F1F4FC", "SCROLL": "#F1F4FC", "TEXT_INPUT": "#000000", "BUTTON": ("#FFFFFF", "#7186C7"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"GreenMono": {"BACKGROUND": "#A8C1B4", "TEXT": "#000000", "INPUT": "#DDE0DE", "SCROLL": "#E3E3E3", "TEXT_INPUT": "#000000",
"BUTTON": ("#FFFFFF", "#6D9F85"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"BrownBlue": {"BACKGROUND": "#64778d", "TEXT": "#FFFFFF", "INPUT": "#f0f3f7", "SCROLL": "#A6B2BE", "TEXT_INPUT": "#000000",
"BUTTON": ("#FFFFFF", "#283b5b"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"BrightColors": {"BACKGROUND": "#b4ffb4", "TEXT": "#000000", "INPUT": "#ffff64", "SCROLL": "#ffb482", "TEXT_INPUT": "#000000",
"BUTTON": ("#000000", "#ffa0dc"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"NeutralBlue": {"BACKGROUND": "#92aa9d", "TEXT": "#000000", "INPUT": "#fcfff6", "SCROLL": "#fcfff6", "TEXT_INPUT": "#000000",
"BUTTON": ("#000000", "#d0dbbd"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Kayak": {"BACKGROUND": "#a7ad7f", "TEXT": "#000000", "INPUT": "#e6d3a8", "SCROLL": "#e6d3a8", "TEXT_INPUT": "#000000", "BUTTON": ("#FFFFFF", "#5d907d"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"SandyBeach": {"BACKGROUND": "#efeccb", "TEXT": "#012f2f", "INPUT": "#e6d3a8", "SCROLL": "#e6d3a8", "TEXT_INPUT": "#012f2f",
"BUTTON": ("#FFFFFF", "#046380"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"TealMono": {"BACKGROUND": "#a8cfdd", "TEXT": "#000000", "INPUT": "#dfedf2", "SCROLL": "#dfedf2", "TEXT_INPUT": "#000000", "BUTTON": ("#FFFFFF", "#183440"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"Default": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT, "TEXT_INPUT": COLOR_SYSTEM_DEFAULT,
"SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, "PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1, "SLIDER_DEPTH": 1,
"PROGRESS_DEPTH": 0, },
"Default1": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT, "TEXT_INPUT": COLOR_SYSTEM_DEFAULT,
"SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": COLOR_SYSTEM_DEFAULT, "PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1, "SLIDER_DEPTH": 1,
"PROGRESS_DEPTH": 0, },
"DefaultNoMoreNagging": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT,
"TEXT_INPUT": COLOR_SYSTEM_DEFAULT, "SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR,
"PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1, "SLIDER_DEPTH": 1, "PROGRESS_DEPTH": 0, },
"GrayGrayGray": {"BACKGROUND": COLOR_SYSTEM_DEFAULT, "TEXT": COLOR_SYSTEM_DEFAULT, "INPUT": COLOR_SYSTEM_DEFAULT, "TEXT_INPUT": COLOR_SYSTEM_DEFAULT,
"SCROLL": COLOR_SYSTEM_DEFAULT, "BUTTON": COLOR_SYSTEM_DEFAULT, "PROGRESS": COLOR_SYSTEM_DEFAULT, "BORDER": 1, "SLIDER_DEPTH": 1,
"PROGRESS_DEPTH": 0, },
"LightBlue": {"BACKGROUND": "#E3F2FD", "TEXT": "#000000", "INPUT": "#86A8FF", "TEXT_INPUT": "#000000", "SCROLL": "#86A8FF",
"BUTTON": ("#FFFFFF", "#5079D3"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 0, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"ACCENT1": "#FF0266", "ACCENT2": "#FF5C93", "ACCENT3": "#C5003C", },
"LightGrey": {"BACKGROUND": "#FAFAFA", "TEXT": "#000000", "INPUT": "#004EA1", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#5EA7FF",
"BUTTON": ("#FFFFFF", "#0079D3"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 0, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"ACCENT1": "#FF0266", "ACCENT2": "#FF5C93", "ACCENT3": "#C5003C", },
"LightGrey1": {"BACKGROUND": "#ffffff", "TEXT": "#1a1a1b", "INPUT": "#dae0e6", "TEXT_INPUT": "#222222", "SCROLL": "#a5a4a4",
"BUTTON": ("#FFFFFF", "#0079d3"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"ACCENT1": "#ff5414", "ACCENT2": "#33a8ff", "ACCENT3": "#dbf0ff", },
"DarkBrown": {"BACKGROUND": "#282923", "TEXT": "#E7DB74", "INPUT": "#393a32", "TEXT_INPUT": "#E7C855", "SCROLL": "#E7C855",
"BUTTON": ("#E7C855", "#284B5A"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"ACCENT1": "#c15226", "ACCENT2": "#7a4d5f", "ACCENT3": "#889743", },
"LightGreen1": {"BACKGROUND": "#9FB8AD", "TEXT": "#000000", "INPUT": "#F7F3EC", "TEXT_INPUT": "#000000", "SCROLL": "#F7F3EC",
"BUTTON": ("#FFFFFF", "#475841"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey": {"BACKGROUND": "#404040", "TEXT": "#FFFFFF", "INPUT": "#4D4D4D", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#707070", "BUTTON": ("#FFFFFF", "#004F00"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightGreen2": {"BACKGROUND": "#B7CECE", "TEXT": "#000000", "INPUT": "#FDFFF7", "TEXT_INPUT": "#000000", "SCROLL": "#FDFFF7",
"BUTTON": ("#FFFFFF", "#658268"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "ACCENT1": "#76506d",
"ACCENT2": "#5148f1", "ACCENT3": "#0a1c84", "PROGRESS_DEPTH": 0, },
"DarkGrey1": {"BACKGROUND": "#404040", "TEXT": "#FFFFFF", "INPUT": "#FFFFFF", "TEXT_INPUT": "#000000", "SCROLL": "#707070",
"BUTTON": ("#FFFFFF", "#004F00"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBlack": {"BACKGROUND": "#000000", "TEXT": "#FFFFFF", "INPUT": "#4D4D4D", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#707070",
"BUTTON": ("#000000", "#FFFFFF"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightBrown": {"BACKGROUND": "#fdf6e3", "TEXT": "#268bd1", "INPUT": "#eee8d5", "TEXT_INPUT": "#6c71c3", "SCROLL": "#eee8d5",
"BUTTON": ("#FFFFFF", "#063542"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightBrown1": {"BACKGROUND": "#e5dece", "TEXT": "#063289", "INPUT": "#f9f8f4", "TEXT_INPUT": "#242834", "SCROLL": "#eee8d5",
"BUTTON": ("#FFFFFF", "#063289"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBlue1": {"BACKGROUND": "#242834", "TEXT": "#dfe6f8", "INPUT": "#97755c", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#a9afbb",
"BUTTON": ("#FFFFFF", "#063289"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBrown1": {"BACKGROUND": "#2c2825", "TEXT": "#fdcb52", "INPUT": "#705e52", "TEXT_INPUT": "#fdcb52", "SCROLL": "#705e52",
"BUTTON": ("#000000", "#fdcb52"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBlue2": {"BACKGROUND": "#1a2835", "TEXT": "#d1ecff", "INPUT": "#335267", "TEXT_INPUT": "#acc2d0", "SCROLL": "#1b6497",
"BUTTON": ("#000000", "#fafaf8"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBrown2": {"BACKGROUND": "#280001", "TEXT": "#FFFFFF", "INPUT": "#d8d584", "TEXT_INPUT": "#000000", "SCROLL": "#763e00",
"BUTTON": ("#000000", "#daad28"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGreen": {"BACKGROUND": "#82a459", "TEXT": "#000000", "INPUT": "#d8d584", "TEXT_INPUT": "#000000", "SCROLL": "#e3ecf3",
"BUTTON": ("#FFFFFF", "#517239"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightBlue1": {"BACKGROUND": "#A5CADD", "TEXT": "#6E266E", "INPUT": "#E0F5FF", "TEXT_INPUT": "#000000", "SCROLL": "#E0F5FF",
"BUTTON": ("#FFFFFF", "#303952"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightPurple": {"BACKGROUND": "#B0AAC2", "TEXT": "#000000", "INPUT": "#F2EFE8", "SCROLL": "#F2EFE8", "TEXT_INPUT": "#000000",
"BUTTON": ("#000000", "#C2D4D8"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightBlue2": {"BACKGROUND": "#AAB6D3", "TEXT": "#000000", "INPUT": "#F1F4FC", "SCROLL": "#F1F4FC", "TEXT_INPUT": "#000000",
"BUTTON": ("#FFFFFF", "#7186C7"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightGreen3": {"BACKGROUND": "#A8C1B4", "TEXT": "#000000", "INPUT": "#DDE0DE", "SCROLL": "#E3E3E3", "TEXT_INPUT": "#000000",
"BUTTON": ("#FFFFFF", "#6D9F85"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBlue3": {"BACKGROUND": "#64778d", "TEXT": "#FFFFFF", "INPUT": "#f0f3f7", "SCROLL": "#A6B2BE", "TEXT_INPUT": "#000000",
"BUTTON": ("#FFFFFF", "#283b5b"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightGreen4": {"BACKGROUND": "#b4ffb4", "TEXT": "#000000", "INPUT": "#ffff64", "SCROLL": "#ffb482", "TEXT_INPUT": "#000000",
"BUTTON": ("#000000", "#ffa0dc"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightGreen5": {"BACKGROUND": "#92aa9d", "TEXT": "#000000", "INPUT": "#fcfff6", "SCROLL": "#fcfff6", "TEXT_INPUT": "#000000",
"BUTTON": ("#000000", "#d0dbbd"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightBrown2": {"BACKGROUND": "#a7ad7f", "TEXT": "#000000", "INPUT": "#e6d3a8", "SCROLL": "#e6d3a8", "TEXT_INPUT": "#000000",
"BUTTON": ("#FFFFFF", "#5d907d"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightBrown3": {"BACKGROUND": "#efeccb", "TEXT": "#012f2f", "INPUT": "#e6d3a8", "SCROLL": "#e6d3a8", "TEXT_INPUT": "#012f2f",
"BUTTON": ("#FFFFFF", "#046380"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightBlue3": {"BACKGROUND": "#a8cfdd", "TEXT": "#000000", "INPUT": "#dfedf2", "SCROLL": "#dfedf2", "TEXT_INPUT": "#000000",
"BUTTON": ("#FFFFFF", "#183440"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"LightBrown4": {"BACKGROUND": "#d7c79e", "TEXT": "#a35638", "INPUT": "#9dab86", "TEXT_INPUT": "#000000", "SCROLL": "#a35638",
"BUTTON": ("#FFFFFF", "#a35638"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#a35638", "#9dab86", "#e08f62", "#d7c79e"], },
"DarkTeal": {"BACKGROUND": "#003f5c", "TEXT": "#fb5b5a", "INPUT": "#bc4873", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#bc4873", "BUTTON": ("#FFFFFF", "#fb5b5a"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#003f5c", "#472b62", "#bc4873", "#fb5b5a"], },
"DarkPurple": {"BACKGROUND": "#472b62", "TEXT": "#fb5b5a", "INPUT": "#bc4873", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#bc4873",
"BUTTON": ("#FFFFFF", "#472b62"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#003f5c", "#472b62", "#bc4873", "#fb5b5a"], },
"LightGreen6": {"BACKGROUND": "#eafbea", "TEXT": "#1f6650", "INPUT": "#6f9a8d", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#1f6650",
"BUTTON": ("#FFFFFF", "#1f6650"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#1f6650", "#6f9a8d", "#ea5e5e", "#eafbea"], },
"DarkGrey2": {"BACKGROUND": "#2b2b28", "TEXT": "#f8f8f8", "INPUT": "#f1d6ab", "TEXT_INPUT": "#000000", "SCROLL": "#f1d6ab",
"BUTTON": ("#2b2b28", "#e3b04b"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#2b2b28", "#e3b04b", "#f1d6ab", "#f8f8f8"], },
"LightBrown6": {"BACKGROUND": "#f9b282", "TEXT": "#8f4426", "INPUT": "#de6b35", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#8f4426",
"BUTTON": ("#FFFFFF", "#8f4426"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#8f4426", "#de6b35", "#64ccda", "#f9b282"], },
"DarkTeal1": {"BACKGROUND": "#396362", "TEXT": "#ffe7d1", "INPUT": "#f6c89f", "TEXT_INPUT": "#000000", "SCROLL": "#f6c89f",
"BUTTON": ("#ffe7d1", "#4b8e8d"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#396362", "#4b8e8d", "#f6c89f", "#ffe7d1"], },
"LightBrown7": {"BACKGROUND": "#f6c89f", "TEXT": "#396362", "INPUT": "#4b8e8d", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#396362",
"BUTTON": ("#FFFFFF", "#396362"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#396362", "#4b8e8d", "#f6c89f", "#ffe7d1"], },
"DarkPurple1": {"BACKGROUND": "#0c093c", "TEXT": "#fad6d6", "INPUT": "#eea5f6", "TEXT_INPUT": "#000000", "SCROLL": "#eea5f6",
"BUTTON": ("#FFFFFF", "#df42d1"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#0c093c", "#df42d1", "#eea5f6", "#fad6d6"], },
"DarkGrey3": {"BACKGROUND": "#211717", "TEXT": "#dfddc7", "INPUT": "#f58b54", "TEXT_INPUT": "#000000", "SCROLL": "#f58b54",
"BUTTON": ("#dfddc7", "#a34a28"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#211717", "#a34a28", "#f58b54", "#dfddc7"], },
"LightBrown8": {"BACKGROUND": "#dfddc7", "TEXT": "#211717", "INPUT": "#a34a28", "TEXT_INPUT": "#dfddc7", "SCROLL": "#211717",
"BUTTON": ("#dfddc7", "#a34a28"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#211717", "#a34a28", "#f58b54", "#dfddc7"], },
"DarkBlue4": {"BACKGROUND": "#494ca2", "TEXT": "#e3e7f1", "INPUT": "#c6cbef", "TEXT_INPUT": "#000000", "SCROLL": "#c6cbef",
"BUTTON": ("#FFFFFF", "#8186d5"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#494ca2", "#8186d5", "#c6cbef", "#e3e7f1"], },
"LightBlue4": {"BACKGROUND": "#5c94bd", "TEXT": "#470938", "INPUT": "#1a3e59", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#470938",
"BUTTON": ("#FFFFFF", "#470938"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#470938", "#1a3e59", "#5c94bd", "#f2d6eb"], },
"DarkTeal2": {"BACKGROUND": "#394a6d", "TEXT": "#c0ffb3", "INPUT": "#52de97", "TEXT_INPUT": "#000000", "SCROLL": "#52de97",
"BUTTON": ("#c0ffb3", "#394a6d"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#394a6d", "#3c9d9b", "#52de97", "#c0ffb3"], },
"DarkTeal3": {"BACKGROUND": "#3c9d9b", "TEXT": "#c0ffb3", "INPUT": "#52de97", "TEXT_INPUT": "#000000", "SCROLL": "#52de97",
"BUTTON": ("#c0ffb3", "#394a6d"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#394a6d", "#3c9d9b", "#52de97", "#c0ffb3"], },
"DarkPurple5": {"BACKGROUND": "#730068", "TEXT": "#f6f078", "INPUT": "#01d28e", "TEXT_INPUT": "#000000", "SCROLL": "#01d28e",
"BUTTON": ("#f6f078", "#730068"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#730068", "#434982", "#01d28e", "#f6f078"], },
"DarkPurple2": {"BACKGROUND": "#202060", "TEXT": "#b030b0", "INPUT": "#602080", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#602080",
"BUTTON": ("#FFFFFF", "#202040"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#202040", "#202060", "#602080", "#b030b0"], },
"DarkBlue5": {"BACKGROUND": "#000272", "TEXT": "#ff6363", "INPUT": "#a32f80", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#a32f80",
"BUTTON": ("#FFFFFF", "#341677"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#000272", "#341677", "#a32f80", "#ff6363"], },
"LightGrey2": {"BACKGROUND": "#f6f6f6", "TEXT": "#420000", "INPUT": "#d4d7dd", "TEXT_INPUT": "#420000", "SCROLL": "#420000",
"BUTTON": ("#420000", "#d4d7dd"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#420000", "#d4d7dd", "#eae9e9", "#f6f6f6"], },
"LightGrey3": {"BACKGROUND": "#eae9e9", "TEXT": "#420000", "INPUT": "#d4d7dd", "TEXT_INPUT": "#420000", "SCROLL": "#420000",
"BUTTON": ("#420000", "#d4d7dd"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#420000", "#d4d7dd", "#eae9e9", "#f6f6f6"], },
"DarkBlue6": {"BACKGROUND": "#01024e", "TEXT": "#ff6464", "INPUT": "#8b4367", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#8b4367",
"BUTTON": ("#FFFFFF", "#543864"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#01024e", "#543864", "#8b4367", "#ff6464"], },
"DarkBlue7": {"BACKGROUND": "#241663", "TEXT": "#eae7af", "INPUT": "#a72693", "TEXT_INPUT": "#eae7af", "SCROLL": "#a72693",
"BUTTON": ("#eae7af", "#160f30"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#160f30", "#241663", "#a72693", "#eae7af"], },
"LightBrown9": {"BACKGROUND": "#f6d365", "TEXT": "#3a1f5d", "INPUT": "#c83660", "TEXT_INPUT": "#f6d365", "SCROLL": "#3a1f5d",
"BUTTON": ("#f6d365", "#c83660"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#3a1f5d", "#c83660", "#e15249", "#f6d365"], },
"DarkPurple3": {"BACKGROUND": "#6e2142", "TEXT": "#ffd692", "INPUT": "#e16363", "TEXT_INPUT": "#ffd692", "SCROLL": "#e16363",
"BUTTON": ("#ffd692", "#943855"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#6e2142", "#943855", "#e16363", "#ffd692"], },
"LightBrown10": {"BACKGROUND": "#ffd692", "TEXT": "#6e2142", "INPUT": "#943855", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#6e2142",
"BUTTON": ("#FFFFFF", "#6e2142"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#6e2142", "#943855", "#e16363", "#ffd692"], },
"DarkPurple4": {"BACKGROUND": "#200f21", "TEXT": "#f638dc", "INPUT": "#5a3d5c", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#5a3d5c",
"BUTTON": ("#FFFFFF", "#382039"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#200f21", "#382039", "#5a3d5c", "#f638dc"], },
"LightBlue5": {"BACKGROUND": "#b2fcff", "TEXT": "#3e64ff", "INPUT": "#5edfff", "TEXT_INPUT": "#000000", "SCROLL": "#3e64ff",
"BUTTON": ("#FFFFFF", "#3e64ff"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#3e64ff", "#5edfff", "#b2fcff", "#ecfcff"], },
"DarkTeal4": {"BACKGROUND": "#464159", "TEXT": "#c7f0db", "INPUT": "#8bbabb", "TEXT_INPUT": "#000000", "SCROLL": "#8bbabb",
"BUTTON": ("#FFFFFF", "#6c7b95"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#464159", "#6c7b95", "#8bbabb", "#c7f0db"], },
"LightTeal": {"BACKGROUND": "#c7f0db", "TEXT": "#464159", "INPUT": "#6c7b95", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#464159",
"BUTTON": ("#FFFFFF", "#464159"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#464159", "#6c7b95", "#8bbabb", "#c7f0db"], },
"DarkTeal5": {"BACKGROUND": "#8bbabb", "TEXT": "#464159", "INPUT": "#6c7b95", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#464159",
"BUTTON": ("#c7f0db", "#6c7b95"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#464159", "#6c7b95", "#8bbabb", "#c7f0db"], },
"LightGrey4": {"BACKGROUND": "#faf5ef", "TEXT": "#672f2f", "INPUT": "#99b19c", "TEXT_INPUT": "#672f2f", "SCROLL": "#672f2f",
"BUTTON": ("#672f2f", "#99b19c"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#672f2f", "#99b19c", "#d7d1c9", "#faf5ef"], },
"LightGreen7": {"BACKGROUND": "#99b19c", "TEXT": "#faf5ef", "INPUT": "#d7d1c9", "TEXT_INPUT": "#000000", "SCROLL": "#d7d1c9",
"BUTTON": ("#FFFFFF", "#99b19c"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#672f2f", "#99b19c", "#d7d1c9", "#faf5ef"], },
"LightGrey5": {"BACKGROUND": "#d7d1c9", "TEXT": "#672f2f", "INPUT": "#99b19c", "TEXT_INPUT": "#672f2f", "SCROLL": "#672f2f",
"BUTTON": ("#FFFFFF", "#672f2f"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#672f2f", "#99b19c", "#d7d1c9", "#faf5ef"], },
"DarkBrown3": {"BACKGROUND": "#a0855b", "TEXT": "#f9f6f2", "INPUT": "#f1d6ab", "TEXT_INPUT": "#000000", "SCROLL": "#f1d6ab",
"BUTTON": ("#FFFFFF", "#38470b"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#38470b", "#a0855b", "#f1d6ab", "#f9f6f2"], },
"LightBrown11": {"BACKGROUND": "#f1d6ab", "TEXT": "#38470b", "INPUT": "#a0855b", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#38470b",
"BUTTON": ("#f9f6f2", "#a0855b"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#38470b", "#a0855b", "#f1d6ab", "#f9f6f2"], },
"DarkRed": {"BACKGROUND": "#83142c", "TEXT": "#f9d276", "INPUT": "#ad1d45", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#ad1d45", "BUTTON": ("#f9d276", "#ad1d45"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#44000d", "#83142c", "#ad1d45", "#f9d276"], },
"DarkTeal6": {"BACKGROUND": "#204969", "TEXT": "#fff7f7", "INPUT": "#dadada", "TEXT_INPUT": "#000000", "SCROLL": "#dadada",
"BUTTON": ("#000000", "#fff7f7"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#204969", "#08ffc8", "#dadada", "#fff7f7"], },
"DarkBrown4": {"BACKGROUND": "#252525", "TEXT": "#ff0000", "INPUT": "#af0404", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#af0404",
"BUTTON": ("#FFFFFF", "#252525"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#252525", "#414141", "#af0404", "#ff0000"], },
"LightYellow": {"BACKGROUND": "#f4ff61", "TEXT": "#27aa80", "INPUT": "#32ff6a", "TEXT_INPUT": "#000000", "SCROLL": "#27aa80",
"BUTTON": ("#f4ff61", "#27aa80"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#27aa80", "#32ff6a", "#a8ff3e", "#f4ff61"], },
"DarkGreen1": {"BACKGROUND": "#2b580c", "TEXT": "#fdef96", "INPUT": "#f7b71d", "TEXT_INPUT": "#000000", "SCROLL": "#f7b71d",
"BUTTON": ("#fdef96", "#2b580c"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#2b580c", "#afa939", "#f7b71d", "#fdef96"], },
"LightGreen8": {"BACKGROUND": "#c8dad3", "TEXT": "#63707e", "INPUT": "#93b5b3", "TEXT_INPUT": "#000000", "SCROLL": "#63707e",
"BUTTON": ("#FFFFFF", "#63707e"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#63707e", "#93b5b3", "#c8dad3", "#f2f6f5"], },
"DarkTeal7": {"BACKGROUND": "#248ea9", "TEXT": "#fafdcb", "INPUT": "#aee7e8", "TEXT_INPUT": "#000000", "SCROLL": "#aee7e8",
"BUTTON": ("#000000", "#fafdcb"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#248ea9", "#28c3d4", "#aee7e8", "#fafdcb"], },
"DarkBlue8": {"BACKGROUND": "#454d66", "TEXT": "#d9d872", "INPUT": "#58b368", "TEXT_INPUT": "#000000", "SCROLL": "#58b368",
"BUTTON": ("#000000", "#009975"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#009975", "#454d66", "#58b368", "#d9d872"], },
"DarkBlue9": {"BACKGROUND": "#263859", "TEXT": "#ff6768", "INPUT": "#6b778d", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#6b778d",
"BUTTON": ("#ff6768", "#263859"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#17223b", "#263859", "#6b778d", "#ff6768"], },
"DarkBlue10": {"BACKGROUND": "#0028ff", "TEXT": "#f1f4df", "INPUT": "#10eaf0", "TEXT_INPUT": "#000000", "SCROLL": "#10eaf0",
"BUTTON": ("#f1f4df", "#24009c"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#24009c", "#0028ff", "#10eaf0", "#f1f4df"], },
"DarkBlue11": {"BACKGROUND": "#6384b3", "TEXT": "#e6f0b6", "INPUT": "#b8e9c0", "TEXT_INPUT": "#000000", "SCROLL": "#b8e9c0",
"BUTTON": ("#e6f0b6", "#684949"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#684949", "#6384b3", "#b8e9c0", "#e6f0b6"], },
"DarkTeal8": {"BACKGROUND": "#71a0a5", "TEXT": "#212121", "INPUT": "#665c84", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#212121",
"BUTTON": ("#fab95b", "#665c84"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#212121", "#665c84", "#71a0a5", "#fab95b"], },
"DarkRed1": {"BACKGROUND": "#c10000", "TEXT": "#eeeeee", "INPUT": "#dedede", "TEXT_INPUT": "#000000", "SCROLL": "#dedede", "BUTTON": ("#c10000", "#eeeeee"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#c10000", "#ff4949", "#dedede", "#eeeeee"], },
"LightBrown5": {"BACKGROUND": "#fff591", "TEXT": "#e41749", "INPUT": "#f5587b", "TEXT_INPUT": "#000000", "SCROLL": "#e41749",
"BUTTON": ("#fff591", "#e41749"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#e41749", "#f5587b", "#ff8a5c", "#fff591"], },
"LightGreen9": {"BACKGROUND": "#f1edb3", "TEXT": "#3b503d", "INPUT": "#4a746e", "TEXT_INPUT": "#f1edb3", "SCROLL": "#3b503d",
"BUTTON": ("#f1edb3", "#3b503d"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#3b503d", "#4a746e", "#c8cf94", "#f1edb3"], "DESCRIPTION": ["Green", "Turquoise", "Yellow"], },
"DarkGreen2": {"BACKGROUND": "#3b503d", "TEXT": "#f1edb3", "INPUT": "#c8cf94", "TEXT_INPUT": "#000000", "SCROLL": "#c8cf94",
"BUTTON": ("#f1edb3", "#3b503d"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#3b503d", "#4a746e", "#c8cf94", "#f1edb3"], "DESCRIPTION": ["Green", "Turquoise", "Yellow"], },
"LightGray1": {"BACKGROUND": "#f2f2f2", "TEXT": "#222831", "INPUT": "#393e46", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#222831",
"BUTTON": ("#f2f2f2", "#222831"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#222831", "#393e46", "#f96d00", "#f2f2f2"], "DESCRIPTION": ["#000000", "Grey", "Orange", "Grey", "Autumn"], },
"DarkGrey4": {"BACKGROUND": "#52524e", "TEXT": "#e9e9e5", "INPUT": "#d4d6c8", "TEXT_INPUT": "#000000", "SCROLL": "#d4d6c8",
"BUTTON": ("#FFFFFF", "#9a9b94"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#52524e", "#9a9b94", "#d4d6c8", "#e9e9e5"], "DESCRIPTION": ["Grey", "Pastel", "Winter"], },
"DarkBlue12": {"BACKGROUND": "#324e7b", "TEXT": "#f8f8f8", "INPUT": "#86a6df", "TEXT_INPUT": "#000000", "SCROLL": "#86a6df",
"BUTTON": ("#FFFFFF", "#5068a9"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#324e7b", "#5068a9", "#86a6df", "#f8f8f8"], "DESCRIPTION": ["Blue", "Grey", "Cold", "Winter"], },
"DarkPurple6": {"BACKGROUND": "#070739", "TEXT": "#e1e099", "INPUT": "#c327ab", "TEXT_INPUT": "#e1e099", "SCROLL": "#c327ab",
"BUTTON": ("#e1e099", "#521477"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#070739", "#521477", "#c327ab", "#e1e099"], "DESCRIPTION": ["#000000", "Purple", "Yellow", "Dark"], },
"DarkPurple7": {"BACKGROUND": "#191930", "TEXT": "#B1B7C5", "INPUT": "#232B5C", "TEXT_INPUT": "#D0E3E7", "SCROLL": "#B1B7C5",
"BUTTON": ("#272D38", "#B1B7C5"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBlue13": {"BACKGROUND": "#203562", "TEXT": "#e3e8f8", "INPUT": "#c0c5cd", "TEXT_INPUT": "#000000", "SCROLL": "#c0c5cd",
"BUTTON": ("#FFFFFF", "#3e588f"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#203562", "#3e588f", "#c0c5cd", "#e3e8f8"], "DESCRIPTION": ["Blue", "Grey", "Wedding", "Cold"], },
"DarkBrown5": {"BACKGROUND": "#3c1b1f", "TEXT": "#f6e1b5", "INPUT": "#e2bf81", "TEXT_INPUT": "#000000", "SCROLL": "#e2bf81",
"BUTTON": ("#3c1b1f", "#f6e1b5"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#3c1b1f", "#b21e4b", "#e2bf81", "#f6e1b5"], "DESCRIPTION": ["Brown", "Red", "Yellow", "Warm"], },
"DarkGreen3": {"BACKGROUND": "#062121", "TEXT": "#eeeeee", "INPUT": "#e4dcad", "TEXT_INPUT": "#000000", "SCROLL": "#e4dcad",
"BUTTON": ("#eeeeee", "#181810"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#062121", "#181810", "#e4dcad", "#eeeeee"], "DESCRIPTION": ["#000000", "#000000", "Brown", "Grey"], },
"DarkBlack1": {"BACKGROUND": "#181810", "TEXT": "#eeeeee", "INPUT": "#e4dcad", "TEXT_INPUT": "#000000", "SCROLL": "#e4dcad",
"BUTTON": ("#FFFFFF", "#062121"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#062121", "#181810", "#e4dcad", "#eeeeee"], "DESCRIPTION": ["#000000", "#000000", "Brown", "Grey"], },
"DarkGrey5": {"BACKGROUND": "#343434", "TEXT": "#f3f3f3", "INPUT": "#e9dcbe", "TEXT_INPUT": "#000000", "SCROLL": "#e9dcbe",
"BUTTON": ("#FFFFFF", "#8e8b82"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#343434", "#8e8b82", "#e9dcbe", "#f3f3f3"], "DESCRIPTION": ["Grey", "Brown"], },
"LightBrown12": {"BACKGROUND": "#8e8b82", "TEXT": "#f3f3f3", "INPUT": "#e9dcbe", "TEXT_INPUT": "#000000", "SCROLL": "#e9dcbe",
"BUTTON": ("#f3f3f3", "#8e8b82"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#343434", "#8e8b82", "#e9dcbe", "#f3f3f3"], "DESCRIPTION": ["Grey", "Brown"], },
"DarkTeal9": {"BACKGROUND": "#13445a", "TEXT": "#fef4e8", "INPUT": "#446878", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#446878",
"BUTTON": ("#fef4e8", "#446878"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#13445a", "#970747", "#446878", "#fef4e8"], "DESCRIPTION": ["Red", "Grey", "Blue", "Wedding", "Retro"], },
"DarkBlue14": {"BACKGROUND": "#21273d", "TEXT": "#f1f6f8", "INPUT": "#b9d4f1", "TEXT_INPUT": "#000000", "SCROLL": "#b9d4f1",
"BUTTON": ("#FFFFFF", "#6a759b"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#21273d", "#6a759b", "#b9d4f1", "#f1f6f8"], "DESCRIPTION": ["Blue", "#000000", "Grey", "Cold", "Winter"], },
"LightBlue6": {"BACKGROUND": "#f1f6f8", "TEXT": "#21273d", "INPUT": "#6a759b", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#21273d",
"BUTTON": ("#f1f6f8", "#6a759b"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#21273d", "#6a759b", "#b9d4f1", "#f1f6f8"], "DESCRIPTION": ["Blue", "#000000", "Grey", "Cold", "Winter"], },
"DarkGreen4": {"BACKGROUND": "#044343", "TEXT": "#e4e4e4", "INPUT": "#045757", "TEXT_INPUT": "#e4e4e4", "SCROLL": "#045757",
"BUTTON": ("#e4e4e4", "#045757"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#222222", "#044343", "#045757", "#e4e4e4"], "DESCRIPTION": ["#000000", "Turquoise", "Grey", "Dark"], },
"DarkGreen5": {"BACKGROUND": "#1b4b36", "TEXT": "#e0e7f1", "INPUT": "#aebd77", "TEXT_INPUT": "#000000", "SCROLL": "#aebd77",
"BUTTON": ("#FFFFFF", "#538f6a"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#1b4b36", "#538f6a", "#aebd77", "#e0e7f1"], "DESCRIPTION": ["Green", "Grey"], },
"DarkTeal10": {"BACKGROUND": "#0d3446", "TEXT": "#d8dfe2", "INPUT": "#71adb5", "TEXT_INPUT": "#000000", "SCROLL": "#71adb5",
"BUTTON": ("#FFFFFF", "#176d81"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#0d3446", "#176d81", "#71adb5", "#d8dfe2"], "DESCRIPTION": ["Grey", "Turquoise", "Winter", "Cold"], },
"DarkGrey6": {"BACKGROUND": "#3e3e3e", "TEXT": "#ededed", "INPUT": "#68868c", "TEXT_INPUT": "#ededed", "SCROLL": "#68868c",
"BUTTON": ("#FFFFFF", "#405559"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#3e3e3e", "#405559", "#68868c", "#ededed"], "DESCRIPTION": ["Grey", "Turquoise", "Winter"], },
"DarkTeal11": {"BACKGROUND": "#405559", "TEXT": "#ededed", "INPUT": "#68868c", "TEXT_INPUT": "#ededed", "SCROLL": "#68868c",
"BUTTON": ("#ededed", "#68868c"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#3e3e3e", "#405559", "#68868c", "#ededed"], "DESCRIPTION": ["Grey", "Turquoise", "Winter"], },
"LightBlue7": {"BACKGROUND": "#9ed0e0", "TEXT": "#19483f", "INPUT": "#5c868e", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#19483f",
"BUTTON": ("#FFFFFF", "#19483f"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#19483f", "#5c868e", "#ff6a38", "#9ed0e0"], "DESCRIPTION": ["Orange", "Blue", "Turquoise"], },
"LightGreen10": {"BACKGROUND": "#d8ebb5", "TEXT": "#205d67", "INPUT": "#639a67", "TEXT_INPUT": "#FFFFFF", "SCROLL": "#205d67",
"BUTTON": ("#d8ebb5", "#205d67"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#205d67", "#639a67", "#d9bf77", "#d8ebb5"], "DESCRIPTION": ["Blue", "Green", "Brown", "Vintage"], },
"DarkBlue15": {"BACKGROUND": "#151680", "TEXT": "#f1fea4", "INPUT": "#375fc0", "TEXT_INPUT": "#f1fea4", "SCROLL": "#375fc0",
"BUTTON": ("#f1fea4", "#1c44ac"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#151680", "#1c44ac", "#375fc0", "#f1fea4"], "DESCRIPTION": ["Blue", "Yellow", "Cold"], },
"DarkBlue16": {"BACKGROUND": "#1c44ac", "TEXT": "#f1fea4", "INPUT": "#375fc0", "TEXT_INPUT": "#f1fea4", "SCROLL": "#375fc0",
"BUTTON": ("#f1fea4", "#151680"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#151680", "#1c44ac", "#375fc0", "#f1fea4"], "DESCRIPTION": ["Blue", "Yellow", "Cold"], },
"DarkTeal12": {"BACKGROUND": "#004a7c", "TEXT": "#fafafa", "INPUT": "#e8f1f5", "TEXT_INPUT": "#000000", "SCROLL": "#e8f1f5",
"BUTTON": ("#fafafa", "#005691"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#004a7c", "#005691", "#e8f1f5", "#fafafa"], "DESCRIPTION": ["Grey", "Blue", "Cold", "Winter"], },
"LightBrown13": {"BACKGROUND": "#ebf5ee", "TEXT": "#921224", "INPUT": "#bdc6b8", "TEXT_INPUT": "#921224", "SCROLL": "#921224",
"BUTTON": ("#FFFFFF", "#921224"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#921224", "#bdc6b8", "#bce0da", "#ebf5ee"], "DESCRIPTION": ["Red", "Blue", "Grey", "Vintage", "Wedding"], },
"DarkBlue17": {"BACKGROUND": "#21294c", "TEXT": "#f9f2d7", "INPUT": "#f2dea8", "TEXT_INPUT": "#000000", "SCROLL": "#f2dea8",
"BUTTON": ("#f9f2d7", "#141829"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#141829", "#21294c", "#f2dea8", "#f9f2d7"], "DESCRIPTION": ["#000000", "Blue", "Yellow"], },
"DarkBrown6": {"BACKGROUND": "#785e4d", "TEXT": "#f2eee3", "INPUT": "#baaf92", "TEXT_INPUT": "#000000", "SCROLL": "#baaf92",
"BUTTON": ("#FFFFFF", "#785e4d"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#785e4d", "#ff8426", "#baaf92", "#f2eee3"], "DESCRIPTION": ["Grey", "Brown", "Orange", "Autumn"], },
"DarkGreen6": {"BACKGROUND": "#5c715e", "TEXT": "#f2f9f1", "INPUT": "#ddeedf", "TEXT_INPUT": "#000000", "SCROLL": "#ddeedf",
"BUTTON": ("#f2f9f1", "#5c715e"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#5c715e", "#b6cdbd", "#ddeedf", "#f2f9f1"], "DESCRIPTION": ["Grey", "Green", "Vintage"], },
"DarkGreen7": {"BACKGROUND": "#0C231E", "TEXT": "#efbe1c", "INPUT": "#153C33", "TEXT_INPUT": "#efbe1c", "SCROLL": "#153C33",
"BUTTON": ("#efbe1c", "#153C33"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey7": {"BACKGROUND": "#4b586e", "TEXT": "#dddddd", "INPUT": "#574e6d", "TEXT_INPUT": "#dddddd", "SCROLL": "#574e6d",
"BUTTON": ("#dddddd", "#43405d"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#43405d", "#4b586e", "#574e6d", "#dddddd"], "DESCRIPTION": ["Grey", "Winter", "Cold"], },
"DarkRed2": {"BACKGROUND": "#ab1212", "TEXT": "#f6e4b5", "INPUT": "#cd3131", "TEXT_INPUT": "#f6e4b5", "SCROLL": "#cd3131", "BUTTON": ("#f6e4b5", "#ab1212"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#ab1212", "#1fad9f", "#cd3131", "#f6e4b5"], "DESCRIPTION": ["Turquoise", "Red", "Yellow"], },
"LightGrey6": {"BACKGROUND": "#e3e3e3", "TEXT": "#233142", "INPUT": "#455d7a", "TEXT_INPUT": "#e3e3e3", "SCROLL": "#233142",
"BUTTON": ("#e3e3e3", "#455d7a"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0,
"COLOR_LIST": ["#233142", "#455d7a", "#f95959", "#e3e3e3"], "DESCRIPTION": ["#000000", "Blue", "Red", "Grey"], },
"HotDogStand": {"BACKGROUND": "red", "TEXT": "yellow", "INPUT": "yellow", "TEXT_INPUT": "#000000", "SCROLL": "yellow", "BUTTON": ("red", "yellow"),
"PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey8": {"BACKGROUND": "#19232D", "TEXT": "#ffffff", "INPUT": "#32414B", "TEXT_INPUT": "#ffffff", "SCROLL": "#505F69",
"BUTTON": ("#ffffff", "#32414B"), "PROGRESS": ("#505F69", "#32414B"), "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey9": {"BACKGROUND": "#36393F", "TEXT": "#DCDDDE", "INPUT": "#40444B", "TEXT_INPUT": "#ffffff", "SCROLL": "#202225",
"BUTTON": ("#202225", "#B9BBBE"), "PROGRESS": ("#202225", "#40444B"), "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey10": {"BACKGROUND": "#1c1e23", "TEXT": "#cccdcf", "INPUT": "#272a31", "TEXT_INPUT": "#8b9fde", "SCROLL": "#313641",
"BUTTON": ("#f5f5f6", "#2e3d5a"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey11": {"BACKGROUND": "#1c1e23", "TEXT": "#cccdcf", "INPUT": "#313641", "TEXT_INPUT": "#cccdcf", "SCROLL": "#313641",
"BUTTON": ("#f5f5f6", "#313641"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey12": {"BACKGROUND": "#1c1e23", "TEXT": "#8b9fde", "INPUT": "#313641", "TEXT_INPUT": "#8b9fde", "SCROLL": "#313641",
"BUTTON": ("#cccdcf", "#2e3d5a"), "PROGRESS": DEFAULT_PROGRESS_BAR_COMPUTE, "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey13": {"BACKGROUND": "#1c1e23", "TEXT": "#cccdcf", "INPUT": "#272a31", "TEXT_INPUT": "#cccdcf", "SCROLL": "#313641",
"BUTTON": ("#8b9fde", "#313641"), "PROGRESS": ("#cccdcf", "#272a31"), "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkGrey14": {"BACKGROUND": "#24292e", "TEXT": "#fafbfc", "INPUT": "#1d2125", "TEXT_INPUT": "#fafbfc", "SCROLL": "#1d2125",
"BUTTON": ("#fafbfc", "#155398"), "PROGRESS": ("#155398", "#1d2125"), "BORDER": 1, "SLIDER_DEPTH": 0, "PROGRESS_DEPTH": 0, },
"DarkBrown7": {"BACKGROUND": "#2c2417", "TEXT": "#baa379", "INPUT": "#baa379", "TEXT_INPUT": "#000000", "SCROLL": "#392e1c",
"BUTTON": ("#000000", "#baa379"), "PROGRESS": ("#baa379", "#453923"), "BORDER": 1, "SLIDER_DEPTH": 1, "PROGRESS_DEPTH": 0, },
"Python": {"BACKGROUND": "#3d7aab", "TEXT": "#ffde56", "INPUT": "#295273", "TEXT_INPUT": "#ffde56", "SCROLL": "#295273", "BUTTON": ("#ffde56", "#295273"),
"PROGRESS": ("#ffde56", "#295273"), "BORDER": 1, "SLIDER_DEPTH": 1, "PROGRESS_DEPTH": 0, },
}
|
|
52,179 | 208,027 | 432 | celery/utils/imports.py | 84 | 18 | def find_module(module, path=None, imp=None):
if imp is None:
imp = import_module
with cwd_in_path():
try:
return imp(module)
except I | Minor refactors, found by static analysis (#7587)
* Remove deprecated methods in `celery.local.Proxy`
* Collapse conditionals for readability
* Remove unused parameter `uuid`
* Remove unused import `ClusterOptions`
* Remove dangerous mutable default argument
Continues work from #5478
* Remove always `None` and unused global variable
* Remove unreachable `elif` block
* Consolidate import statements
* Add missing parameter to `os._exit()`
* Add missing assert statement
* Remove unused global `WindowsError`
* Use `mkstemp` instead of deprecated `mktemp`
* No need for `for..else` constructs in loops that don't break
In these cases where the loop returns or raises instead of breaking, it
is simpler to just put the code that runs after the loop completes right
after the loop instead.
* Use the previously unused parameter `compat_modules`
Previously this parameter was always overwritten by the value of
`COMPAT_MODULES.get(name, ())`, which was very likely unintentional.
* Remove unused local variable `tz`
* Make `assert_received` actually check for `is_received`
Previously, it called `is_accepted`, which was likely a copy-paste
mistake from the `assert_accepted` method.
* Use previously unused `args` and `kwargs` params
Unlike other backends' `__reduce__` methods, the one from `RedisBackend`
simply overwrites `args` and `kwargs` instead of adding to them. This
change makes it more in line with other backends.
* Update celery/backends/filesystem.py
Co-authored-by: Gabriel Soldani <[email protected]>
Co-authored-by: Asif Saif Uddin <[email protected]> | find_module | 59263b0409e3f02dc16ca8a3bd1e42b5a3eba36d | celery | imports.py | 20 | 20 | https://github.com/celery/celery.git | 7 | 105 | 0 | 61 | 185 | Python | {
"docstring": "Version of :func:`imp.find_module` supporting dots.",
"language": "en",
"n_whitespaces": 4,
"n_words": 5,
"vocab_size": 5
} | def find_module(module, path=None, imp=None):
if imp is None:
imp = import_module
with cwd_in_path():
try:
return imp(module)
except ImportError:
# Raise a more specific error if the problem is that one of the
# dot-separated segments of the module name is not a package.
if '.' in module:
parts = module.split('.')
for i, part in enumerate(parts[:-1]):
package = '.'.join(parts[:i + 1])
try:
mpart = imp(package)
except ImportError:
# Break out and re-raise the original ImportError
# instead.
break
try:
mpart.__path__
except AttributeError:
raise NotAPackage(package)
raise
|
|
5,326 | 30,117 | 49 | spotdl/utils/ffmpeg.py | 24 | 7 | def get_ffmpeg_path() -> Optional[Path]:
# Check if ffmpeg is installed
global_ffmpeg = shutil.which("ffmpeg")
if global_ffmpeg:
return Path(global_ffmpeg)
| v4 init | get_ffmpeg_path | fa2ad657482aca9dc628e6d7062b8badf2706bb6 | spotify-downloader | ffmpeg.py | 9 | 9 | https://github.com/spotDL/spotify-downloader.git | 2 | 30 | 0 | 20 | 56 | Python | {
"docstring": "\n Get path to global ffmpeg binary or a local ffmpeg binary.\n Or None if not found.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 15
} | def get_ffmpeg_path() -> Optional[Path]:
# Check if ffmpeg is installed
global_ffmpeg = shutil.which("ffmpeg")
if global_ffmpeg:
return Path(global_ffmpeg)
# Get local ffmpeg path
return get_local_ffmpeg()
|
|
81,158 | 273,959 | 44 | keras/layers/rnn/legacy_cell_wrappers.py | 12 | 8 | def __call__(self, inputs, state, scope=None):
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.__call__, scope=scope
)
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | __call__ | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | legacy_cell_wrappers.py | 10 | 4 | https://github.com/keras-team/keras.git | 1 | 35 | 0 | 10 | 51 | Python | {
"docstring": "Runs the RNN cell step computation.\n\n We assume that the wrapped RNNCell is being built within its `__call__`\n method. We directly use the wrapped cell's `__call__` in the overridden\n wrapper `__call__` method.\n\n This allows to use the wrapped cell and the non-wrapped cell equivalently\n when using `__call__`.\n\n Args:\n inputs: A tensor with wrapped cell's input.\n state: A tensor or tuple of tensors with wrapped cell's state.\n scope: VariableScope for the subgraph created in the wrapped cells'\n `__call__`.\n\n Returns:\n A pair containing:\n\n - Output: A tensor with cell's output.\n - New state: A tensor or tuple of tensors with new wrapped cell's state.\n ",
"language": "en",
"n_whitespaces": 223,
"n_words": 102,
"vocab_size": 59
} | def __call__(self, inputs, state, scope=None):
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.__call__, scope=scope
)
|
|
84,781 | 284,531 | 462 | openbb_terminal/portfolio/portfolio_model.py | 38 | 20 | def get_kurtosis(self) -> pd.DataFrame:
vals = list()
for period in portfolio_helper.PERIODS:
vals.append(
[
round(
scipy.stats.kurtosis(
portfolio_helper.filter_df_by_period(self.returns, period)
),
3,
),
round(
scipy.stats.skew(
portfolio_helper.filter_df_by_period(
self.benchmark_returns, period
)
),
3,
| Portfolio improvements (#1818)
* improve portfolio controller
* improve menu ux with disabling command when port or bench are not loaded
* allow custom reset with benchmark and portfolio loaded
* bench needs portfolio loaded to use start date, reflect that
* fix tests
* allow to see sum of a portfolio holdings
* add r-square to portfolio
* add skewness of data
* add kurtosis
* add stats
* allow perf command to select a period
* add yearly returns to cumulative return plot
* add individual rolling volatility
* add individual rolling sharpe
* add individual rolling sortino
* add individual rolling beta
* add period to cumulative returns
* clean up on aisle 5
* minor fix
* add volatility, sharpe ratio, sortino ratio and maximum drawdown ratio
* remove duplicated metrics
* check for portfolio and benchmark more modular
* fix tests
* remove sqrt(N) and N from sharpe and sortino calculations
* allow hold to export raw data from tail
* automatically add space before and after table
* add portfolio holdings in percentage
* fix relative dates to be more accurate
* refactor metric command to allow to select a metric of interest and check different periods
* fix cumulative return and implement new yearly return command
* add daily returns graph
* add distribution of daily returns command
* add monthly returns command
* add summary command with multiple metrics for a specific period
* calculate yearly (out)performance
* fix show
* rbeta with benchmark of 1
* improve mret style
* improve title of distribution
* improve volatility
* minor improvement in doc
* improve mret and yret
* tests
* update portfolio content on hugo docs
* fix ycrv hugo docs
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* fix issue
Co-authored-by: Jeroen Bouma <[email protected]> | get_kurtosis | 0e3b62e143c981d81fb46a7e7bb75f93d9159198 | OpenBBTerminal | portfolio_model.py | 17 | 31 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 98 | 0 | 30 | 151 | Python | {
"docstring": "Class method that retrieves kurtosis for portfolio and benchmark selected\n\n Returns\n -------\n pd.DataFrame\n DataFrame with kurtosis for portfolio and benchmark for different periods\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 23,
"vocab_size": 17
} | def get_kurtosis(self) -> pd.DataFrame:
vals = list()
for period in portfolio_helper.PERIODS:
vals.append(
[
round(
scipy.stats.kurtosis(
portfolio_helper.filter_df_by_period(self.returns, period)
),
3,
),
round(
scipy.stats.skew(
portfolio_helper.filter_df_by_period(
self.benchmark_returns, period
)
),
3,
),
]
)
return pd.DataFrame(
vals, index=portfolio_helper.PERIODS, columns=["Portfolio", "Benchmark"]
)
|
|
80,341 | 269,933 | 1,932 | keras/callbacks.py | 230 | 39 | def _save_model(self, epoch, batch, logs):
logs = logs or {}
if (
isinstance(self.save_freq, int)
or self.epochs_since_last_save >= self.period
):
# Block only when saving interval is reached.
logs = tf_utils.sync_to_numpy_or_python_type(logs)
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, batch, logs)
try:
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning(
"Can save best model only with %s available, "
"skipping.",
self.monitor,
)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: {self.monitor} improved "
f"from {self.best:.5f} to {current:.5f}, "
f"saving model to {filepath}"
)
self.best = current
if self.save_weights_only:
self.model.save_weights(
filepath,
overwrite=True,
options=self._options,
)
else:
self.model.save(
filepath,
overwrite=True,
options=self._options,
)
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: "
f"{self.monitor} did not improve from {self.best:.5f}"
)
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: saving model to {filepath}"
)
if self.save_weights_only:
self.model.save_weights(
filepath, overwrite=True, options=self._options
)
else:
self.model.save(
filepath, overwrite=True, options=self._options
)
self._maybe_remove_file()
except IsADirectoryError as e: # h5py 3.x
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: {filepath}"
)
except IOError as e: # h5py 2.x
# `e.errno` appears to be `None` so checking t | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _save_model | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | callbacks.py | 25 | 73 | https://github.com/keras-team/keras.git | 15 | 306 | 0 | 123 | 579 | Python | {
"docstring": "Saves the model.\n\n Args:\n epoch: the epoch this iteration is in.\n batch: the batch this iteration is in. `None` if the `save_freq`\n is set to `epoch`.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.\n ",
"language": "en",
"n_whitespaces": 96,
"n_words": 36,
"vocab_size": 26
} | def _save_model(self, epoch, batch, logs):
logs = logs or {}
if (
isinstance(self.save_freq, int)
or self.epochs_since_last_save >= self.period
):
# Block only when saving interval is reached.
logs = tf_utils.sync_to_numpy_or_python_type(logs)
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, batch, logs)
try:
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning(
"Can save best model only with %s available, "
"skipping.",
self.monitor,
)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: {self.monitor} improved "
f"from {self.best:.5f} to {current:.5f}, "
f"saving model to {filepath}"
)
self.best = current
if self.save_weights_only:
self.model.save_weights(
filepath,
overwrite=True,
options=self._options,
)
else:
self.model.save(
filepath,
overwrite=True,
options=self._options,
)
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: "
f"{self.monitor} did not improve from {self.best:.5f}"
)
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: saving model to {filepath}"
)
if self.save_weights_only:
self.model.save_weights(
filepath, overwrite=True, options=self._options
)
else:
self.model.save(
filepath, overwrite=True, options=self._options
)
self._maybe_remove_file()
except IsADirectoryError as e: # h5py 3.x
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: {filepath}"
)
except IOError as e: # h5py 2.x
# `e.errno` appears to be `None` so checking the content of `e.args[0]`.
if "is a directory" in str(e.args[0]).lower():
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: f{filepath}"
)
# Re-throw the error for any other causes.
raise e
|
|
42,302 | 177,172 | 231 | networkx/algorithms/approximation/steinertree.py | 102 | 24 | def steiner_tree(G, terminal_nodes, weight="weight", method=None):
r
if method is None:
import warnings
msg = (
"steiner_tree will change default method from 'kou' to 'mehlhorn'"
| Add Mehlhorn Steiner approximations (#5629)
* Add Wu et al. and Mehlhorn Steiner approximations
* Change default steiner tree approximation method
* Add missing space in error message
* Changes as suggested
* Fix Kou implementation
* Bugfix and variable name change for Mehlhorn
* Add failing test case for Wu Steiner tree
* Add additional valid Steiner tree for test
* Remove Wu et al implementation
* Style change + remove unused code | steiner_tree | 56032abfdff74aebe7e6adbaa711bf4fd6bd7826 | networkx | steinertree.py | 18 | 86 | https://github.com/networkx/networkx.git | 5 | 141 | 0 | 81 | 226 | Python | {
"docstring": "Return an approximation to the minimum Steiner tree of a graph.\n\n The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*)\n is a tree within `G` that spans those nodes and has minimum size (sum of\n edge weights) among all such trees.\n\n The approximation algorithm is specified with the `method` keyword\n argument. All three available algorithms produce a tree whose weight is\n within a (2 - (2 / l)) factor of the weight of the optimal Steiner tree,\n where *l* is the minimum number of leaf nodes across all possible Steiner\n trees.\n\n * `kou` [2]_ (runtime $O(|S| |V|^2)$) computes the minimum spanning tree of\n the subgraph of the metric closure of *G* induced by the terminal nodes,\n where the metric closure of *G* is the complete graph in which each edge is\n weighted by the shortest path distance between the nodes in *G*.\n * `mehlhorn` [3]_ (runtime $O(|E|+|V|\\log|V|)$) modifies Kou et al.'s\n algorithm, beginning by finding the closest terminal node for each\n non-terminal. This data is used to create a complete graph containing only\n the terminal nodes, in which edge is weighted with the shortest path\n distance between them. The algorithm then proceeds in the same way as Kou\n et al..\n\n Parameters\n ----------\n G : NetworkX graph\n\n terminal_nodes : list\n A list of terminal nodes for which minimum steiner tree is\n to be found.\n\n weight : string (default = 'weight')\n Use the edge attribute specified by this string as the edge weight.\n Any edge attribute not present defaults to 1.\n\n method : string, optional (default = 'kou')\n The algorithm to use to approximate the Steiner tree.\n Supported options: 'kou', 'mehlhorn'.\n Other inputs produce a ValueError.\n\n Returns\n -------\n NetworkX graph\n Approximation to the minimum steiner tree of `G` induced by\n `terminal_nodes` .\n\n Notes\n -----\n For multigraphs, the edge between two nodes with minimum weight is the\n edge put into the Steiner tree.\n\n\n References\n ----------\n .. [1] Steiner_tree_problem on Wikipedia.\n https://en.wikipedia.org/wiki/Steiner_tree_problem\n .. [2] Kou, L., G. Markowsky, and L. Berman. 1981.\n ‘A Fast Algorithm for Steiner Trees’.\n Acta Informatica 15 (2): 141–45.\n https://doi.org/10.1007/BF00288961.\n .. [3] Mehlhorn, Kurt. 1988.\n ‘A Faster Approximation Algorithm for the Steiner Problem in Graphs’.\n Information Processing Letters 27 (3): 125–28.\n https://doi.org/10.1016/0020-0190(88)90066-X.\n ",
"language": "en",
"n_whitespaces": 612,
"n_words": 366,
"vocab_size": 202
} | def steiner_tree(G, terminal_nodes, weight="weight", method=None):
r
if method is None:
import warnings
msg = (
"steiner_tree will change default method from 'kou' to 'mehlhorn'"
"in version 3.2.\nSet the `method` kwarg to remove this warning."
)
warnings.warn(msg, FutureWarning, stacklevel=4)
method = "kou"
try:
algo = ALGORITHMS[method]
except KeyError as e:
msg = f"{method} is not a valid choice for an algorithm."
raise ValueError(msg) from e
edges = algo(G, terminal_nodes, weight)
# For multigraph we should add the minimal weight edge keys
if G.is_multigraph():
edges = (
(u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges
)
T = G.edge_subgraph(edges)
return T
|
|
15,916 | 72,955 | 35 | wagtail/api/v2/views.py | 10 | 7 | def find_object(self, queryset, request):
if "id" in request.GET:
| Reformat with black | find_object | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | views.py | 12 | 3 | https://github.com/wagtail/wagtail.git | 2 | 31 | 0 | 10 | 53 | Python | {
"docstring": "\n Override this to implement more find methods.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def find_object(self, queryset, request):
if "id" in request.GET:
return queryset.get(id=request.GET["id"])
|
|
39,421 | 163,372 | 57 | pandas/core/dtypes/cast.py | 21 | 8 | def _maybe_infer_dtype_type(element):
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = | CLN: assorted, privatize, easy issues (#45305) | maybe_infer_dtype_type | 5ba7d714014ae8feaccc0dd4a98890828cf2832d | pandas | cast.py | 11 | 8 | https://github.com/pandas-dev/pandas.git | 3 | 43 | 0 | 14 | 74 | Python | {
"docstring": "\n Try to infer an object's dtype, for use in arithmetic ops.\n\n Uses `element.dtype` if that's available.\n Objects implementing the iterator protocol are cast to a NumPy array,\n and from there the array's type is used.\n\n Parameters\n ----------\n element : object\n Possibly has a `.dtype` attribute, and possibly the iterator\n protocol.\n\n Returns\n -------\n tipo : type\n\n Examples\n --------\n >>> from collections import namedtuple\n >>> Foo = namedtuple(\"Foo\", \"dtype\")\n >>> _maybe_infer_dtype_type(Foo(np.dtype(\"i8\")))\n dtype('int64')\n ",
"language": "en",
"n_whitespaces": 136,
"n_words": 70,
"vocab_size": 59
} | def _maybe_infer_dtype_type(element):
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
|
|
108,472 | 309,776 | 107 | tests/components/alexa/test_smart_home.py | 59 | 11 | def test_create_api_message_special():
request = get_new_request("Alexa.PowerController", "TurnOn")
directive_header = request["directive"]["header"]
directive_header.pop("correlationToken")
directive = messages.AlexaDirective(request)
msg = directive.response("testName", "testNameSpace")._response
assert "event" in msg
msg = msg["event"]
assert msg["header"]["messageId"] is not None
assert msg["header"]["messageId"] != directive_header["messageId"]
assert "correlationToken" not in msg["header"]
assert msg["header"]["name"] == "testName"
assert msg["header"]["namespace"] == "testNameSpace"
assert msg["header"]["payloadVersion"] == "3"
assert msg["payload"] == {}
assert "endpoint" not in msg
| Fix comments in Alexa (#64289) | test_create_api_message_special | c109d59862d1e2e28e54160ee75f9465771e99eb | core | test_smart_home.py | 10 | 16 | https://github.com/home-assistant/core.git | 1 | 133 | 0 | 36 | 252 | Python | {
"docstring": "Create an API message response of a request with non defaults.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_create_api_message_special():
request = get_new_request("Alexa.PowerController", "TurnOn")
directive_header = request["directive"]["header"]
directive_header.pop("correlationToken")
directive = messages.AlexaDirective(request)
msg = directive.response("testName", "testNameSpace")._response
assert "event" in msg
msg = msg["event"]
assert msg["header"]["messageId"] is not None
assert msg["header"]["messageId"] != directive_header["messageId"]
assert "correlationToken" not in msg["header"]
assert msg["header"]["name"] == "testName"
assert msg["header"]["namespace"] == "testNameSpace"
assert msg["header"]["payloadVersion"] == "3"
assert msg["payload"] == {}
assert "endpoint" not in msg
|
|
6,012 | 32,880 | 43 | tests/mixed_int8/test_mixed_int8.py | 9 | 9 | def tearDown(self):
r
del self.model_fp16
del self.model_8bit
gc.collect()
torch.c | `bitsandbytes` - `Linear8bitLt` integration into `transformers` models (#17901)
* first commit
* correct replace function
* add final changes
- works like charm!
- cannot implement tests yet
- tested
* clean up a bit
* add bitsandbytes dependencies
* working version
- added import function
- added bitsandbytes utils file
* small fix
* small fix
- fix import issue
* fix import issues
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <[email protected]>
* refactor a bit
- move bitsandbytes utils to utils
- change comments on functions
* reformat docstring
- reformat docstring on init_empty_weights_8bit
* Update src/transformers/__init__.py
Co-authored-by: Sylvain Gugger <[email protected]>
* revert bad formatting
* change to bitsandbytes
* refactor a bit
- remove init8bit since it is useless
* more refactoring
- fixed init empty weights issue
- added threshold param
* small hack to make it work
* Update src/transformers/modeling_utils.py
* Update src/transformers/modeling_utils.py
* revmoe the small hack
* modify utils file
* make style + refactor a bit
* create correctly device map
* add correct dtype for device map creation
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <[email protected]>
* apply suggestions
- remove with torch.grad
- do not rely on Python bool magic!
* add docstring
- add docstring for new kwargs
* add docstring
- comment `replace_8bit_linear` function
- fix weird formatting
* - added more documentation
- added new utility function for memory footprint tracking
- colab demo to add
* few modifs
- typo doc
- force cast into float16 when load_in_8bit is enabled
* added colab link
* add test architecture + docstring a bit
* refactor a bit testing class
* make style + refactor a bit
* enhance checks
- add more checks
- start writing saving test
* clean up a bit
* male style
* add more details on doc
* add more tests
- still needs to fix 2 tests
* replace by "or"
- could not fix it from GitHub GUI
Co-authored-by: Sylvain Gugger <[email protected]>
* refactor a bit testing code + add readme
* make style
* fix import issue
* Update src/transformers/modeling_utils.py
Co-authored-by: Michael Benayoun <[email protected]>
* add few comments
* add more doctring + make style
* more docstring
* raise error when loaded in 8bit
* make style
* add warning if loaded on CPU
* add small sanity check
* fix small comment
* add bitsandbytes on dockerfile
* Improve documentation
- improve documentation from comments
* add few comments
* slow tests pass on the VM but not on the CI VM
* Fix merge conflict
* make style
* another test should pass on a multi gpu setup
* fix bad import in testing file
* Fix slow tests
- remove dummy batches
- no more CUDA illegal memory errors
* odify dockerfile
* Update docs/source/en/main_classes/model.mdx
* Update Dockerfile
* Update model.mdx
* Update Dockerfile
* Apply suggestions from code review
* few modifications
- lm head can stay on disk/cpu
- change model name so that test pass
* change test value
- change test value to the correct output
- torch bmm changed to baddmm in bloom modeling when merging
* modify installation guidelines
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <[email protected]>
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <[email protected]>
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <[email protected]>
* replace `n`by `name`
* merge `load_in_8bit` and `low_cpu_mem_usage`
* first try - keep the lm head in full precision
* better check
- check the attribute `base_model_prefix` instead of computing the number of parameters
* added more tests
* Update src/transformers/utils/bitsandbytes.py
Co-authored-by: Sylvain Gugger <[email protected]>
* Merge branch 'integration-8bit' of https://github.com/younesbelkada/transformers into integration-8bit
* improve documentation
- fix typos for installation
- change title in the documentation
Co-authored-by: Sylvain Gugger <[email protected]>
Co-authored-by: Michael Benayoun <[email protected]> | tearDown | 4a51075a96d2049f368b5f3dd6c0e9f08f599b62 | transformers | test_mixed_int8.py | 8 | 9 | https://github.com/huggingface/transformers.git | 1 | 27 | 0 | 8 | 46 | Python | {
"docstring": "\n TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to\n avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 27,
"vocab_size": 24
} | def tearDown(self):
r
del self.model_fp16
del self.model_8bit
gc.collect()
torch.cuda.empty_cache()
|
|
71,142 | 246,307 | 681 | tests/rest/client/test_relations.py | 226 | 32 | def test_pagination_from_sync_and_messages(self):
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
self.assertEquals(200, channel.code, channel.json_body)
annotation_id = channel.json_body["event_id"]
# Send an event after the relation events.
self.helper.send(self.room, body="Latest event", tok=self.user_token)
# Request /sync, limiting it such that only the latest event is returned
# (and not the relation).
filter = urllib.parse.quote_plus(
'{"room": {"timeline": {"limit": 1}}}'.encode()
)
channel = self.make_request(
"GET", f"/sync?filter={filter}", access_token=self.user_token
)
self.assertEquals(200, channel.code, channel.json_body)
room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"]
sync_prev_batch = room_timeline["prev_batch"]
self.assertIsNotNone(sync_prev_batch)
# Ensure the relation event is not in the batch returned from /sync.
self.assertNotIn(
annotation_id, [ev["event_id"] for ev in room_timeline["events"]]
)
# Request /messages, limiting it such that only the latest event is
# returned (and not the relation).
channel = self.make_request(
"GET",
f"/rooms/{self.room}/messages?dir=b&limit=1",
access_token=self.user_token,
)
self.assertEquals(200, channel.code, channel.json_body)
messages_end = channel.json_body["end"]
self.assertIsNotNone(messages_end)
# Ensure the relation event is not in the chunk returned from /messages.
self.assertNotIn(
annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]]
)
# Request /relations with the pagination tokens received from both the
# /sync and /messages responses above, in turn.
#
# This is a tiny bit silly | Support pagination tokens from /sync and /messages in the relations API. (#11952) | test_pagination_from_sync_and_messages | df36945ff0e4a293a9dac0da07e2c94256835b32 | synapse | test_relations.py | 13 | 39 | https://github.com/matrix-org/synapse.git | 5 | 289 | 0 | 111 | 505 | Python | {
"docstring": "Pagination tokens from /sync and /messages can be used to paginate /relations.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_pagination_from_sync_and_messages(self):
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
self.assertEquals(200, channel.code, channel.json_body)
annotation_id = channel.json_body["event_id"]
# Send an event after the relation events.
self.helper.send(self.room, body="Latest event", tok=self.user_token)
# Request /sync, limiting it such that only the latest event is returned
# (and not the relation).
filter = urllib.parse.quote_plus(
'{"room": {"timeline": {"limit": 1}}}'.encode()
)
channel = self.make_request(
"GET", f"/sync?filter={filter}", access_token=self.user_token
)
self.assertEquals(200, channel.code, channel.json_body)
room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"]
sync_prev_batch = room_timeline["prev_batch"]
self.assertIsNotNone(sync_prev_batch)
# Ensure the relation event is not in the batch returned from /sync.
self.assertNotIn(
annotation_id, [ev["event_id"] for ev in room_timeline["events"]]
)
# Request /messages, limiting it such that only the latest event is
# returned (and not the relation).
channel = self.make_request(
"GET",
f"/rooms/{self.room}/messages?dir=b&limit=1",
access_token=self.user_token,
)
self.assertEquals(200, channel.code, channel.json_body)
messages_end = channel.json_body["end"]
self.assertIsNotNone(messages_end)
# Ensure the relation event is not in the chunk returned from /messages.
self.assertNotIn(
annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]]
)
# Request /relations with the pagination tokens received from both the
# /sync and /messages responses above, in turn.
#
# This is a tiny bit silly since the client wouldn't know the parent ID
# from the requests above; consider the parent ID to be known from a
# previous /sync.
for from_token in (sync_prev_batch, messages_end):
channel = self.make_request(
"GET",
f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}",
access_token=self.user_token,
)
self.assertEquals(200, channel.code, channel.json_body)
# The relation should be in the returned chunk.
self.assertIn(
annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]]
)
|
|
22,025 | 104,910 | 31 | src/datasets/utils/streaming_download_manager.py | 10 | 6 | def download(self, url_or_urls):
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
| Add API code examples for Builder classes (#4313)
* 📝 add examples for builder classes
* 📝 apply quentin review | download | d1d4f1065fd4ab91b2c8682643dbd12f86d66fcd | datasets | streaming_download_manager.py | 9 | 3 | https://github.com/huggingface/datasets.git | 1 | 24 | 0 | 9 | 38 | Python | {
"docstring": "Download given url(s).\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url is a `str`.\n\n Returns:\n downloaded_path(s): `str`, The downloaded paths matching the given input\n url_or_urls.\n\n Example:\n\n ```py\n >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')\n ```\n ",
"language": "en",
"n_whitespaces": 138,
"n_words": 37,
"vocab_size": 35
} | def download(self, url_or_urls):
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
return url_or_urls
|
|
1,652 | 9,673 | 243 | reconstruction/ostec/utils/generate_heatmap.py | 148 | 20 | def draw_gaussian(image, point, sigma):
# Check if the gaussian is inside
point[0] = round(point[0], 2)
point[1] = round(point[1], 2)
ul = [math.floor(point[0] - 7.5 * sigma), math.floor(point[1] - 7.5 * sigma)]
br = [math.floor(point[0] + 7.5 * sigma), math.floor(point[1] + 7.5 * sigma)]
if (ul[0] > image.shape[1] or ul[1] >
image.shape[0] or br[0] < 1 or br[1] < 1):
return image
size = 15 * sigma + 1
g = _ga | Improved landmark differentiability by heatmaps. | draw_gaussian | 2a8b181d4ddfc542d0784b8ea7341f09500ff299 | insightface | generate_heatmap.py | 15 | 21 | https://github.com/deepinsight/insightface.git | 6 | 469 | 0 | 86 | 667 | Python | {
"docstring": " Draw gaussian circle at a point in an image.\n\n Args:\n image (np.array): An image of shape (H, W)\n point (np.array): The center point of the guassian circle\n sigma (float): Standard deviation of the gaussian kernel\n\n Returns:\n np.array: The image with the drawn gaussian.\n ",
"language": "en",
"n_whitespaces": 81,
"n_words": 43,
"vocab_size": 31
} | def draw_gaussian(image, point, sigma):
# Check if the gaussian is inside
point[0] = round(point[0], 2)
point[1] = round(point[1], 2)
ul = [math.floor(point[0] - 7.5 * sigma), math.floor(point[1] - 7.5 * sigma)]
br = [math.floor(point[0] + 7.5 * sigma), math.floor(point[1] + 7.5 * sigma)]
if (ul[0] > image.shape[1] or ul[1] >
image.shape[0] or br[0] < 1 or br[1] < 1):
return image
size = 15 * sigma + 1
g = _gaussian(size, sigma=0.1)
g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) -
int(max(1, ul[0])) + int(max(1, -ul[0]))]
g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) -
int(max(1, ul[1])) + int(max(1, -ul[1]))]
img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]
img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]
assert (g_x[0] > 0 and g_y[1] > 0)
image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] = \
image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]]
image[image > 1] = 1
return image
# Adapted from: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/api.py |
|
56,422 | 221,530 | 54 | python3.10.4/Lib/collections/__init__.py | 15 | 4 | def setdefault(self, key, default=None):
if key in self:
return self[key]
self[key] = default
return default
| add python 3.10.4 for windows | setdefault | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | __init__.py | 8 | 5 | https://github.com/XX-net/XX-Net.git | 2 | 30 | 0 | 12 | 47 | Python | {
"docstring": "Insert key with a value of default if key is not in the dictionary.\n\n Return the value for key if key is in the dictionary, else default.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 27,
"vocab_size": 18
} | def setdefault(self, key, default=None):
if key in self:
return self[key]
self[key] = default
return default
|
|
40,398 | 169,203 | 28 | web/pandas_web.py | 7 | 5 | def current_year(context):
context["current_year"] = datetime. | WEB: Add new footer to web (#48557) | current_year | bbf17ea692e437cec908eae6759ffff8092fb42e | pandas | pandas_web.py | 10 | 3 | https://github.com/pandas-dev/pandas.git | 1 | 22 | 0 | 7 | 40 | Python | {
"docstring": "\n Add the current year to the context, so it can be used for the copyright\n note, or other places where it is needed.\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 20
} | def current_year(context):
context["current_year"] = datetime.datetime.now().year
return context
|
|
505 | 3,627 | 98 | airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/stream.py | 37 | 12 | def fileformatparser_map(self) -> Mapping[str, type]:
return {
"csv": CsvParser,
"parquet": ParquetParser,
}
# TODO: make these user configurable in spec.json
ab_additional_col = "_ab_additional_properties"
ab_last_mod_col = "_ab_source_file_last_modified"
ab_file_name_col = "_ab_source_file_url"
airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col]
| 🐛 Source S3: Loading of files' metadata (#8252) | fileformatparser_map | 91eff1dffdb04be968b6ee4ef8d8bbfeb2e882d0 | airbyte | stream.py | 8 | 6 | https://github.com/airbytehq/airbyte.git | 1 | 24 | 0 | 33 | 82 | Python | {
"docstring": "Mapping where every key is equal 'filetype' and values are corresponding parser classes.",
"language": "en",
"n_whitespaces": 15,
"n_words": 13,
"vocab_size": 13
} | def fileformatparser_map(self) -> Mapping[str, type]:
return {
"csv": CsvParser,
"parquet": ParquetParser,
}
# TODO: make these user configurable in spec.json
ab_additional_col = "_ab_additional_properties"
ab_last_mod_col = "_ab_source_file_last_modified"
ab_file_name_col = "_ab_source_file_url"
airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col]
datetime_format_string = "%Y-%m-%dT%H:%M:%S%z"
|
|
36,553 | 156,094 | 57 | dask/dataframe/core.py | 18 | 9 | def pivot_table(self, index=None, columns=None, values=None, aggfunc="mean"):
from dask.dataframe.reshape import pivot_table
return pivot_table(
self, index=index, columns=colum | absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | pivot_table | cccb9d8d8e33a891396b1275c2448c352ef40c27 | dask | core.py | 8 | 5 | https://github.com/dask/dask.git | 1 | 51 | 0 | 18 | 74 | Python | {
"docstring": "\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.\n\n Parameters\n ----------\n values : scalar\n column to aggregate\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n aggfunc : {'mean', 'sum', 'count'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n ",
"language": "en",
"n_whitespaces": 186,
"n_words": 61,
"vocab_size": 43
} | def pivot_table(self, index=None, columns=None, values=None, aggfunc="mean"):
from dask.dataframe.reshape import pivot_table
return pivot_table(
self, index=index, columns=columns, values=values, aggfunc=aggfunc
)
|
|
31,314 | 138,092 | 141 | python/ray/tune/tests/test_actor_reuse.py | 42 | 19 | def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "2"
register_trainable("foo2", MyResettableClass)
[trial1, trial2, trial3, trial4] = tune.run(
"foo2",
config={
"fail": tune.grid_search([False, True, False, False]),
"id": -1,
"sleep": 2,
},
reuse_actors=True,
resources_per_trial={"cpu": 2},
raise_on_failed_trial=False,
).trials
assert trial1.last_result["num_resets"] == 0
assert trial3.last_result["num_resets | [air/tune] Internal resource management 2 - Ray Tune to use new Ray AIR resource manager (#30016)
Includes/depends on #30777
TLDR: This PR refactors Ray Tune's resource management to use a central AIR resource management package instead of the tightly coupled PlacementGroupManager.
Ray Tune's resource management currently uses a tightly coupled placement group manager. This leads to a number of shortcomings:
- The tight coupling on the manager side (e.g. PG manager keeps track of trials) prevents re-usability
- The tight coupling on the trial executor side prevents using different resource management strategies (e.g. shared or budget-based)
- It's hard to test independently. Tests for the resource management require a simulated tune setup.
To improve stability, extensibility, and maintainability, this PR moves the resource management logic into a central `ray.air.execution.resources` subpackage. The resource management has a simple API that works with `ResourceRequest`s and `AllocatedResources` to manage requested and assigned resources, respectively. The actual resource management can then be anything - per default it is a placement group based manager, but this PR also introduces a PoC budget-based manager that can be plugged in.
The PR does not substantially change existing tests, so we can be certain that the new resource model is a fully compatible replacement for the old placement group manager.
Signed-off-by: Kai Fricke <[email protected]> | test_multi_trial_reuse_with_failing | 1510fb2cd631b2776092fb45ee4082e5e65f16f8 | ray | test_actor_reuse.py | 15 | 17 | https://github.com/ray-project/ray.git | 1 | 113 | 0 | 36 | 183 | Python | {
"docstring": "Test that failing trial's actors are not reused.\n\n - 2 trials can run at the same time\n - Trial 1 succeeds, trial 2 fails\n - Trial 3 will be scheduled after trial 2 failed, so won't reuse actor\n - Trial 4 will be scheduled after trial 1 succeeded, so will reuse actor\n ",
"language": "en",
"n_whitespaces": 67,
"n_words": 52,
"vocab_size": 34
} | def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "2"
register_trainable("foo2", MyResettableClass)
[trial1, trial2, trial3, trial4] = tune.run(
"foo2",
config={
"fail": tune.grid_search([False, True, False, False]),
"id": -1,
"sleep": 2,
},
reuse_actors=True,
resources_per_trial={"cpu": 2},
raise_on_failed_trial=False,
).trials
assert trial1.last_result["num_resets"] == 0
assert trial3.last_result["num_resets"] == 0
assert trial4.last_result["num_resets"] == 1
|
|
15,938 | 73,067 | 114 | wagtail/contrib/forms/views.py | 32 | 24 | def dispatch(self, request, *args, **kwargs):
page_id = kwargs.get("page_id")
if not get_forms_for_user(self.request.user).filter(id=page_id).exists():
raise PermissionDenied
self.page = get_object_or_404(Page, id=page_id).specific
self.submissions = self.get_queryset()
if self.request.method == "POST":
self.handle_delete(self.submissions)
return redirect(self.get_success_url(), page_id)
return super().dispatch(request, *args, * | Reformat with black | dispatch | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | views.py | 14 | 10 | https://github.com/wagtail/wagtail.git | 3 | 112 | 0 | 27 | 182 | Python | {
"docstring": "Check permissions, set the page and submissions, handle delete",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def dispatch(self, request, *args, **kwargs):
page_id = kwargs.get("page_id")
if not get_forms_for_user(self.request.user).filter(id=page_id).exists():
raise PermissionDenied
self.page = get_object_or_404(Page, id=page_id).specific
self.submissions = self.get_queryset()
if self.request.method == "POST":
self.handle_delete(self.submissions)
return redirect(self.get_success_url(), page_id)
return super().dispatch(request, *args, **kwargs)
|
|
36,901 | 157,358 | 72 | ldm/models/diffusion/ddpm.py | 30 | 23 | def _prior_bpd(self, x_start):
batch_size = x_start.shape[0]
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_f | release more models | _prior_bpd | ca86da3a30c4e080d4db8c25fca73de843663cb4 | stablediffusion | ddpm.py | 12 | 6 | https://github.com/Stability-AI/stablediffusion.git | 1 | 90 | 0 | 27 | 127 | Python | {
"docstring": "\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n ",
"language": "en",
"n_whitespaces": 91,
"n_words": 48,
"vocab_size": 40
} | def _prior_bpd(self, x_start):
batch_size = x_start.shape[0]
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
|
|
39,660 | 165,326 | 39 | pandas/tests/window/test_rolling.py | 27 | 9 | def test_rolling_non_monotonic(method, expected):
# Based on an example found in computation.rst
use_expanding = [True, False, True, False, True, True, True, True]
df = DataFrame({"values": np.arange(len(use_expanding)) ** 2})
| ENH: Rolling window with step size (GH-15354) (#45765) | test_rolling_non_monotonic | 6caefb19f4d7c05451fafca182c6eb39fe9901ed | pandas | test_rolling.py | 15 | 9 | https://github.com/pandas-dev/pandas.git | 1 | 100 | 0 | 22 | 72 | Python | {
"docstring": "\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 32,
"vocab_size": 29
} | def test_rolling_non_monotonic(method, expected):
# Based on an example found in computation.rst
use_expanding = [True, False, True, False, True, True, True, True]
df = DataFrame({"values": np.arange(len(use_expanding)) ** 2})
|
|
36,756 | 156,746 | 33 | dask/array/core.py | 12 | 7 | def clip(self, min=None, max=None):
from dask.array.ufunc import clip
return cl | Don't include docs in ``Array`` methods, just refer to module docs (#9244)
Co-authored-by: James Bourbeau <[email protected]> | clip | 2820bae493a49cb1d0a6e376985c5473b8f04fa8 | dask | core.py | 7 | 3 | https://github.com/dask/dask.git | 1 | 31 | 0 | 11 | 46 | Python | {
"docstring": "Return an array whose values are limited to ``[min, max]``.\n One of max or min must be given.\n\n Refer to :func:`dask.array.clip` for full documentation.\n\n See Also\n --------\n dask.array.clip : equivalent function\n ",
"language": "en",
"n_whitespaces": 73,
"n_words": 31,
"vocab_size": 30
} | def clip(self, min=None, max=None):
from dask.array.ufunc import clip
return clip(self, min, max)
|
|
77,311 | 262,675 | 105 | TTS/tts/layers/overflow/common_layers.py | 34 | 12 | def _floor_std(self, std):
r
origi | Adding OverFlow (#2183)
* Adding encoder
* currently modifying hmm
* Adding hmm
* Adding overflow
* Adding overflow setting up flat start
* Removing runs
* adding normalization parameters
* Fixing models on same device
* Training overflow and plotting evaluations
* Adding inference
* At the end of epoch the test sentences are coming on cpu instead of gpu
* Adding figures from model during training to monitor
* reverting tacotron2 training recipe
* fixing inference on gpu for test sentences on config
* moving helpers and texts within overflows source code
* renaming to overflow
* moving loss to the model file
* Fixing the rename
* Model training but not plotting the test config sentences's audios
* Formatting logs
* Changing model name to camelcase
* Fixing test log
* Fixing plotting bug
* Adding some tests
* Adding more tests to overflow
* Adding all tests for overflow
* making changes to camel case in config
* Adding information about parameters and docstring
* removing compute_mel_statistics moved statistic computation to the model instead
* Added overflow in readme
* Adding more test cases, now it doesn't saves transition_p like tensor and can be dumped as json | _floor_std | 3b8b105b0d6539ac12972de94e0b2a5077fa1ce2 | TTS | common_layers.py | 10 | 16 | https://github.com/coqui-ai/TTS.git | 2 | 50 | 0 | 31 | 83 | Python | {
"docstring": "\n It clamps the standard deviation to not to go below some level\n This removes the problem when the model tries to cheat for higher likelihoods by converting\n one of the gaussians to a point mass.\n\n Args:\n std (float Tensor): tensor containing the standard deviation to be\n ",
"language": "en",
"n_whitespaces": 93,
"n_words": 46,
"vocab_size": 36
} | def _floor_std(self, std):
r
original_tensor = std.clone().detach()
std = torch.clamp(std, min=self.std_floor)
if torch.any(original_tensor != std):
print(
"[*] Standard deviation was floored! The model is preventing overfitting, nothing serious to worry about"
)
return std
|
|
13,247 | 63,314 | 1,003 | .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 175 | 29 | def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
nl = "\n"
out = []
namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [nl, indent, "<", selfTag, ">"]
for i, res in enumerate(self.__toklist):
if isinstance(res, ParseResults):
if i in namedItems:
out += [res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedIt | upd; format | asXML | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | pyparsing.py | 18 | 49 | https://github.com/jindongwang/transferlearning.git | 16 | 278 | 0 | 83 | 454 | Python | {
"docstring": "\n (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 19,
"vocab_size": 18
} | def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
nl = "\n"
out = []
namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [nl, indent, "<", selfTag, ">"]
for i, res in enumerate(self.__toklist):
if isinstance(res, ParseResults):
if i in namedItems:
out += [res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">"]
out += [nl, indent, "</", selfTag, ">"]
return "".join(out)
|
|
19,283 | 96,149 | 22 | src/sentry/models/group.py | 8 | 5 | def times_seen_with_pending(self) -> int: | fix(post_process): Fetch buffered `times_seen` values and add them to `Group.times_seen` (#31624)
In `post_process_group` we process issue alert rules and also ignored groups. Both of these can have
conditions that read from the `times_seen` value on the `Group`.
The problem here is that updates to `times_seen` are buffered and only written every 45s or so. This
means that most of the time when a `Group` goes through `post_process_group` it has an out of date
`times_seen` value. For infrequently updated groups, this can just mean that the count is -1. But
for high volume groups this could mean that we're considerably below the count.
To improve this, we read the current value from buffers and store it as pending updates on the group.
We then use this pending value when checking rules and snoozes in post process. There's a potential
race condition here where we fetch the `Group`, and before we fetch the value from buffers it is
cleared, and so we miss out on the update. This should be infrequent enough that it's not a problem,
and either way we will be considerably more accurate most of the time. | times_seen_with_pending | 09726d7fc95e53bb516e328fc1811fc9a0704cac | sentry | group.py | 7 | 6 | https://github.com/getsentry/sentry.git | 1 | 16 | 0 | 8 | 28 | Python | {
"docstring": "\n Returns `times_seen` with any additional pending updates from `buffers` added on. This value\n must be set first.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 17,
"vocab_size": 17
} | def times_seen_with_pending(self) -> int:
return self.times_seen + self.times_seen_pending
|
|
117,005 | 319,841 | 169 | src/documents/tests/test_api.py | 22 | 14 | def test_api_create_storage_path(self):
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"name": "A storage path",
"path": "Somewhere/{asn}",
},
),
content_ | Adds invalid storage path format test | test_api_create_storage_path | d7f7d839f8a6b7d0378dda1e0744739748d71b9c | paperless-ngx | test_api.py | 13 | 13 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 64 | 0 | 22 | 108 | Python | {
"docstring": "\n GIVEN:\n - API request to create a storage paths\n WHEN:\n - API is called\n THEN:\n - Correct HTTP response\n - New storage path is created\n ",
"language": "en",
"n_whitespaces": 98,
"n_words": 25,
"vocab_size": 19
} | def test_api_create_storage_path(self):
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"name": "A storage path",
"path": "Somewhere/{asn}",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(StoragePath.objects.count(), 2)
|
|
@pytest.mark.django_db | 17,258 | 81,780 | 374 | awx/main/tests/functional/models/test_workflow.py | 63 | 31 | def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):
r = post(
url=reverse('api:workflow_job_template_list'),
data=dict(
name='workflow that tests ask_for prompts',
organization=organization.id,
inventory=inventory.id,
job_tags='',
skip_tags='',
ask_inventory_on_launch=True,
ask_labels_on_launch=True,
ask_limit_on_launch=True,
ask_scm_branch_on_launch=True,
| adding prompt-to-launch field on Labels field in Workflow Templates; with necessary UI and testing changes
Co-authored-by: Keith Grant <[email protected]> | test_set_all_ask_for_prompts_true_from_post | 663ef2cc6413c0cdb26392bb046b37fe564fb546 | awx | test_workflow.py | 13 | 28 | https://github.com/ansible/awx.git | 1 | 151 | 1 | 44 | 234 | Python | {
"docstring": "\n Tests behaviour and values of ask_for_* fields on WFJT via POST\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):
r = post(
url=reverse('api:workflow_job_template_list'),
data=dict(
name='workflow that tests ask_for prompts',
organization=organization.id,
inventory=inventory.id,
job_tags='',
skip_tags='',
ask_inventory_on_launch=True,
ask_labels_on_launch=True,
ask_limit_on_launch=True,
ask_scm_branch_on_launch=True,
ask_skip_tags_on_launch=True,
ask_tags_on_launch=True,
ask_variables_on_launch=True,
),
user=org_admin,
expect=201,
)
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
assert wfjt.ask_inventory_on_launch is True
assert wfjt.ask_labels_on_launch is True
assert wfjt.ask_limit_on_launch is True
assert wfjt.ask_scm_branch_on_launch is True
assert wfjt.ask_skip_tags_on_launch is True
assert wfjt.ask_tags_on_launch is True
assert wfjt.ask_variables_on_launch is True
@pytest.mark.django_db |
28,779 | 128,697 | 128 | python/ray/_private/utils.py | 60 | 15 | def get_used_memory():
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
# For cgroups v1:
memory_usage_filename = "/sys/fs/cgroup/memory/memory.stat"
# For cgroups v2:
memory_usage_filename | [core] update cgroup v1 memory usage calculation to ignore inactive (cache) files (#29103)
Signed-off-by: Clarence Ng [email protected]
Adjust used memory calculation for cgroup v1, to make it inline with how working set memory is calculated, which is what the cgroup oom killer uses. Before this change we include the rss and cache, and not discount the inactive / purgeable cache content. When we write to disk or object store it generates a large amount of page cache. If we don't discount this cache content it will result in over-counting, and hence trigger the ray oom killer earlier than what it should be. | get_used_memory | 036225dec2d1f0d895043ca5f0aeeff377aa7fc7 | ray | utils.py | 15 | 12 | https://github.com/ray-project/ray.git | 4 | 76 | 0 | 44 | 139 | Python | {
"docstring": "Return the currently used system memory in bytes\n\n Returns:\n The total amount of used memory\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 15,
"vocab_size": 13
} | def get_used_memory():
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
# For cgroups v1:
memory_usage_filename = "/sys/fs/cgroup/memory/memory.stat"
# For cgroups v2:
memory_usage_filename_v2 = "/sys/fs/cgroup/memory.current"
if os.path.exists(memory_usage_filename):
docker_usage = get_cgroupv1_used_memory(memory_usage_filename)
elif os.path.exists(memory_usage_filename_v2):
with open(memory_usage_filename_v2, "r") as f:
docker_usage = int(f.read())
if docker_usage is not None:
return docker_usage
return psutil.virtual_memory().used
|
|
80,703 | 271,128 | 161 | keras/engine/data_adapter.py | 71 | 7 | def pack_x_y_sample_weight(x, y=None, sample_weight=None):
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unnecessary tuple
if not tf.nest.is_nested(x):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | pack_x_y_sample_weight | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | data_adapter.py | 11 | 10 | https://github.com/keras-team/keras.git | 4 | 60 | 0 | 53 | 97 | Python | {
"docstring": "Packs user-provided data into a tuple.\n\n This is a convenience utility for packing data into the tuple formats\n that `Model.fit` uses.\n\n Standalone usage:\n\n >>> x = tf.ones((10, 1))\n >>> data = tf.keras.utils.pack_x_y_sample_weight(x)\n >>> isinstance(data, tf.Tensor)\n True\n >>> y = tf.ones((10, 1))\n >>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)\n >>> isinstance(data, tuple)\n True\n >>> x, y = data\n\n Args:\n x: Features to pass to `Model`.\n y: Ground-truth targets to pass to `Model`.\n sample_weight: Sample weight for each element.\n\n Returns:\n Tuple in the format used in `Model.fit`.\n ",
"language": "en",
"n_whitespaces": 148,
"n_words": 83,
"vocab_size": 54
} | def pack_x_y_sample_weight(x, y=None, sample_weight=None):
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unnecessary tuple
if not tf.nest.is_nested(x):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
|
|
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False]) | 76,232 | 260,408 | 209 | sklearn/linear_model/_glm/tests/test_glm.py | 91 | 40 | def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha,
fit | TST tight tests for GLMs (#23619)
Co-authored-by: Olivier Grisel <[email protected]> | test_glm_regression_vstacked_X | 9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f | scikit-learn | test_glm.py | 11 | 25 | https://github.com/scikit-learn/scikit-learn.git | 2 | 188 | 1 | 71 | 320 | Python | {
"docstring": "Test that GLM converges for all solvers to correct solution on vstacked data.\n\n We work with a simple constructed data set with known solution.\n Fit on [X] with alpha is the same as fit on [X], [y]\n [X], [y] with 1 * alpha.\n It is the same alpha as the average loss stays the same.\n For wide X, [X', X'] is a singular matrix.\n ",
"language": "en",
"n_whitespaces": 126,
"n_words": 64,
"vocab_size": 48
} | def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
# solver=solver, # only lbfgs available
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
model.fit(X, y)
rtol = 3e-5
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False]) |
48,348 | 197,115 | 41 | sympy/tensor/tensor.py | 8 | 5 | def deprecate_data():
sympy_deprecation_warning(
,
| Update the various tensor deprecations | deprecate_data | cba899d4137b0b65f6850120ee42cd4fcd4f9dbf | sympy | tensor.py | 9 | 10 | https://github.com/sympy/sympy.git | 1 | 21 | 0 | 8 | 37 | Python | {
"docstring": "\n The data attribute of TensorIndexType is deprecated. Use The\n replace_with_arrays() method instead.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 11
} | def deprecate_data():
sympy_deprecation_warning(
,
deprecated_since_version="1.4",
active_deprecations_target="deprecated-tensorindextype-attrs",
stacklevel=4,
)
|
|
76,453 | 260,743 | 69 | sklearn/preprocessing/_function_transformer.py | 23 | 11 | def fit(self, X, y=None):
sel | MAINT Add parameter validation for `FunctionTransformer`. (#24180)
Co-authored-by: Jérémie du Boisberranger <[email protected]> | fit | b85f799d0a7242aace8bffd5c8fd7cf3585340af | scikit-learn | _function_transformer.py | 11 | 6 | https://github.com/scikit-learn/scikit-learn.git | 4 | 57 | 0 | 22 | 91 | Python | {
"docstring": "Fit transformer by checking X.\n\n If ``validate`` is ``True``, ``X`` will be checked.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input array.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n FunctionTransformer class instance.\n ",
"language": "en",
"n_whitespaces": 139,
"n_words": 43,
"vocab_size": 40
} | def fit(self, X, y=None):
self._validate_params()
X = self._check_input(X, reset=True)
if self.check_inverse and not (self.func is None or self.inverse_func is None):
self._check_inverse_transform(X)
return self
|
|
48,967 | 198,505 | 47 | sympy/printing/dot.py | 17 | 9 | def styleof(expr, styles=default_styles):
style = {}
for typ, sty in styles:
if isinstance(expr, typ):
style | Code cleanup | styleof | 9d58006fc0a23afcba38f641c9472917c436428a | sympy | dot.py | 11 | 6 | https://github.com/sympy/sympy.git | 3 | 37 | 0 | 16 | 60 | Python | {
"docstring": " Merge style dictionaries in order\n\n Examples\n ========\n\n >>> from sympy import Symbol, Basic, Expr, S\n >>> from sympy.printing.dot import styleof\n >>> styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}),\n ... (Expr, {'color': 'black'})]\n\n >>> styleof(Basic(S(1)), styles)\n {'color': 'blue', 'shape': 'ellipse'}\n\n >>> x = Symbol('x')\n >>> styleof(x + 1, styles) # this is an Expr\n {'color': 'black', 'shape': 'ellipse'}\n ",
"language": "en",
"n_whitespaces": 106,
"n_words": 57,
"vocab_size": 41
} | def styleof(expr, styles=default_styles):
style = {}
for typ, sty in styles:
if isinstance(expr, typ):
style.update(sty)
return style
|
|
14,760 | 68,324 | 21 | erpnext/support/report/first_response_time_for_issues/first_response_time_for_issues.py | 36 | 10 | def execute(filters=None):
columns = [
{"fieldname": "creation_date | fix: bulk fix (~330) missing translations | execute | a896895a9e76a68ab055ce7871bb9d181d3fac15 | erpnext | first_response_time_for_issues.py | 12 | 25 | https://github.com/frappe/erpnext.git | 1 | 79 | 0 | 31 | 142 | Python | {
"docstring": "\n\t\tSELECT\n\t\t\tdate(creation) as creation_date,\n\t\t\tavg(first_response_time) as avg_response_time\n\t\tFROM tabIssue\n\t\tWHERE\n\t\t\tdate(creation) between %s and %s\n\t\t\tand first_response_time > 0\n\t\tGROUP BY creation_date\n\t\tORDER BY creation_date desc\n\t",
"language": "en",
"n_whitespaces": 17,
"n_words": 26,
"vocab_size": 20
} | def execute(filters=None):
columns = [
{"fieldname": "creation_date", "label": _("Date"), "fieldtype": "Date", "width": 300},
{
"fieldname": "first_response_time",
"fieldtype": "Duration",
"label": _("First Response Time"),
"width": 300,
},
]
data = frappe.db.sql(
,
(filters.from_date, filters.to_date),
)
return columns, data
|
|
2,931 | 19,295 | 278 | PathPlanning/RRTStar/rrt_star.py | 74 | 22 | def choose_parent(self, new_node, near_inds):
if not near_inds:
return None
# search nearest cost in near_inds
costs = []
for i in near_inds:
near_node = self.node_list[i]
t_node = self.steer(near_node, new_node)
if t_node and self.check_collision(
t_node, self.obstacle_list, self.robot_radius):
| Add optional robot radius to RRT/RRTStar path planners (#655)
* Add optional robot radius to RRT/RRTStar path planners.
* update __init__ and check_collision to include radius
* during animation, if a robot radius is given then it is drawn
* Add test for robot radius
* Correct import error
* Correct missing robot_radius errors
* Address "expected 2 blank lines, found 1" error
* Address "line too long" errors
* Add missing argument description.
* Remove collision_check_with_xy and replace with check_collision
* Fix "missing whitespace after ','" error
* Update PathPlanning/ClosedLoopRRTStar/closed_loop_rrt_star_car.py
Co-authored-by: Atsushi Sakai <[email protected]>
Co-authored-by: Atsushi Sakai <[email protected]> | choose_parent | b53fdf75f66ccb63b5cfaadaa81253d43f01805a | PythonRobotics | rrt_star.py | 15 | 20 | https://github.com/AtsushiSakai/PythonRobotics.git | 6 | 138 | 0 | 53 | 224 | Python | {
"docstring": "\n Computes the cheapest point to new_node contained in the list\n near_inds and set such a node as the parent of new_node.\n Arguments:\n --------\n new_node, Node\n randomly generated node with a path from its neared point\n There are not coalitions between this node and th tree.\n near_inds: list\n Indices of indices of the nodes what are near to new_node\n\n Returns.\n ------\n Node, a copy of new_node\n ",
"language": "en",
"n_whitespaces": 233,
"n_words": 65,
"vocab_size": 48
} | def choose_parent(self, new_node, near_inds):
if not near_inds:
return None
# search nearest cost in near_inds
costs = []
for i in near_inds:
near_node = self.node_list[i]
t_node = self.steer(near_node, new_node)
if t_node and self.check_collision(
t_node, self.obstacle_list, self.robot_radius):
costs.append(self.calc_new_cost(near_node, new_node))
else:
costs.append(float("inf")) # the cost of collision node
min_cost = min(costs)
if min_cost == float("inf"):
print("There is no good path.(min_cost is inf)")
return None
min_ind = near_inds[costs.index(min_cost)]
new_node = self.steer(self.node_list[min_ind], new_node)
new_node.cost = min_cost
return new_node
|
|
30,064 | 133,631 | 295 | rllib/agents/a3c/tests/test_a3c.py | 54 | 23 | def test_a3c_compilation(self):
config = a3c.DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["num_envs_per_worker"] = 2
num_iterations = 1
# Test against all frameworks.
for _ in framework_iterator(config, with_eager_tracing=True):
for env in ["CartPole-v1", "Pendulum-v1", "PongDeterministic-v0"]:
print("env={}".format(env))
config["model"]["use_lstm"] = env == "CartPole-v1"
trainer = a3c.A3CTrainer(config=config, env=env)
for i in range(num_iterations):
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | test_a3c_compilation | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_a3c.py | 15 | 18 | https://github.com/ray-project/ray.git | 4 | 129 | 0 | 42 | 224 | Python | {
"docstring": "Test whether an A3CTrainer can be built with both frameworks.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_a3c_compilation(self):
config = a3c.DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["num_envs_per_worker"] = 2
num_iterations = 1
# Test against all frameworks.
for _ in framework_iterator(config, with_eager_tracing=True):
for env in ["CartPole-v1", "Pendulum-v1", "PongDeterministic-v0"]:
print("env={}".format(env))
config["model"]["use_lstm"] = env == "CartPole-v1"
trainer = a3c.A3CTrainer(config=config, env=env)
for i in range(num_iterations):
results = trainer.train()
check_train_results(results)
print(results)
check_compute_single_action(
trainer, include_state=config["model"]["use_lstm"]
)
trainer.stop()
|
|
3,863 | 21,475 | 372 | pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py | 76 | 24 | def extract(self, member, path="", set_attrs=True):
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tar | Vendor in pip 22.1.2 | extract | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | tarfile.py | 19 | 24 | https://github.com/pypa/pipenv.git | 8 | 170 | 0 | 52 | 279 | Python | {
"docstring": "Extract a member from the archive to the current working directory,\n using its full name. Its file information is extracted as accurately\n as possible. `member' may be a filename or a TarInfo object. You can\n specify a different directory using `path'. File attributes (owner,\n mtime, mode) are set unless `set_attrs' is False.\n ",
"language": "en",
"n_whitespaces": 99,
"n_words": 52,
"vocab_size": 45
} | def extract(self, member, path="", set_attrs=True):
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
|
|
5,604 | 30,465 | 32 | tests/types/test_artist.py | 17 | 8 | def test_artist_from_string():
artist = Artist.from_search_term("artist:gorillaz")
assert artist.name == "Gorillaz"
assert artist.url == "http://open.spotify.com/artist/3AA28KZvwAUcZuOKwyblJQ"
| Search album by string enhancement (#1663) | test_artist_from_string | 57ce5c09ee1ac101f79962e59bd44a0396dfb76c | spotify-downloader | test_artist.py | 9 | 5 | https://github.com/spotDL/spotify-downloader.git | 1 | 34 | 0 | 14 | 63 | Python | {
"docstring": "\n Test if Artist class can be initialized from string.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | def test_artist_from_string():
artist = Artist.from_search_term("artist:gorillaz")
assert artist.name == "Gorillaz"
assert artist.url == "http://open.spotify.com/artist/3AA28KZvwAUcZuOKwyblJQ"
assert len(artist.urls) > 1
|
|
35,134 | 151,776 | 382 | freqtrade/freqai/RL/BaseEnvironment.py | 117 | 37 | def reset(self):
# custom_info is used for episodic reports and tensorboard logging
self.custom_info["Invalid"] = 0
self.custom_info["Hold"] = 0
self.custom_info["Unknown"] = 0
self.custom_info["pnl_factor"] = 0
self.custom_info["duration_factor"] = 0
self.custom_info["reward_exit"] = 0
self.custom_info["reward_hold"] = 0
for action in self.actions:
self.custom_info[f"{action.name}"] = 0
self._done = False
if self.starting_point is True:
if self.rl_config.get('randomize_starting_position', False):
length_of_data = int(self | reorganize/generalize tensorboard callback | reset | 24766928baddfed919be1138a64d51cdbb0d3764 | freqtrade | BaseEnvironment.py | 14 | 31 | https://github.com/freqtrade/freqtrade.git | 4 | 259 | 0 | 73 | 427 | Python | {
"docstring": "\n Reset is called at the beginning of every episode\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def reset(self):
# custom_info is used for episodic reports and tensorboard logging
self.custom_info["Invalid"] = 0
self.custom_info["Hold"] = 0
self.custom_info["Unknown"] = 0
self.custom_info["pnl_factor"] = 0
self.custom_info["duration_factor"] = 0
self.custom_info["reward_exit"] = 0
self.custom_info["reward_hold"] = 0
for action in self.actions:
self.custom_info[f"{action.name}"] = 0
self._done = False
if self.starting_point is True:
if self.rl_config.get('randomize_starting_position', False):
length_of_data = int(self._end_tick / 4)
start_tick = random.randint(self.window_size + 1, length_of_data)
self._start_tick = start_tick
self._position_history = (self._start_tick * [None]) + [self._position]
else:
self._position_history = (self.window_size * [None]) + [self._position]
self._current_tick = self._start_tick
self._last_trade_tick = None
self._position = Positions.Neutral
self.total_reward = 0.
self._total_profit = 1. # unit
self.history = {}
self.trade_history = []
self.portfolio_log_returns = np.zeros(len(self.prices))
self._profits = [(self._start_tick, 1)]
self.close_trade_profit = []
self._total_unrealized_profit = 1
return self._get_observation()
|
|
13,841 | 65,288 | 22 | erpnext/accounts/report/non_billed_report.py | 44 | 22 | def get_ordered_to_be_billed_data(args):
doctype, party = args.get("doctype"), args.get("party")
child_tab = doctype + " Item"
precision = (
get_field_precision(
frappe.get_meta(child_tab).get_field("billed_amt"), currency=get_default_currency()
)
or 2
)
project_field = get_project_field(doctype, party)
return frappe.db.sql(
.format(
parent_tab="tab" + doctype,
child_tab="tab" + child_tab,
precision=precision,
party=party,
date_field=args.get("date"),
project_field=project_field,
order=args.get("order"),
order_by=args.get("order_by"),
)
)
| style: format code with black | get_ordered_to_be_billed_data | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | non_billed_report.py | 14 | 46 | https://github.com/frappe/erpnext.git | 2 | 125 | 0 | 35 | 208 | Python | {
"docstring": "\n\t\tSelect\n\t\t\t`{parent_tab}`.name, `{parent_tab}`.{date_field},\n\t\t\t`{parent_tab}`.{party}, `{parent_tab}`.{party}_name,\n\t\t\t`{child_tab}`.item_code,\n\t\t\t`{child_tab}`.base_amount,\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)),\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0)),\n\t\t\t(`{child_tab}`.base_amount -\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)) -\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))),\n\t\t\t`{child_tab}`.item_name, `{child_tab}`.description,\n\t\t\t{project_field}, `{parent_tab}`.company\n\t\tfrom\n\t\t\t`{parent_tab}`, `{child_tab}`\n\t\twhere\n\t\t\t`{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1\n\t\t\tand `{parent_tab}`.status not in ('Closed', 'Completed')\n\t\t\tand `{child_tab}`.amount > 0\n\t\t\tand (`{child_tab}`.base_amount -\n\t\t\tround(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) -\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))) > 0\n\t\torder by\n\t\t\t`{parent_tab}`.{order} {order_by}\n\t\t",
"language": "en",
"n_whitespaces": 47,
"n_words": 70,
"vocab_size": 48
} | def get_ordered_to_be_billed_data(args):
doctype, party = args.get("doctype"), args.get("party")
child_tab = doctype + " Item"
precision = (
get_field_precision(
frappe.get_meta(child_tab).get_field("billed_amt"), currency=get_default_currency()
)
or 2
)
project_field = get_project_field(doctype, party)
return frappe.db.sql(
.format(
parent_tab="tab" + doctype,
child_tab="tab" + child_tab,
precision=precision,
party=party,
date_field=args.get("date"),
project_field=project_field,
order=args.get("order"),
order_by=args.get("order_by"),
)
)
|
|
32,811 | 142,825 | 73 | python/ray/tune/execution/ray_trial_executor.py | 19 | 6 | def get_staged_trial(self):
# TODO(xwjiang): This method should consider `self._cached_actor_pg`.
for trial in self._staged_trials:
if self._pg_m | [tune/structure] Introduce execution package (#26015)
Execution-specific packages are moved to tune.execution.
Co-authored-by: Xiaowei Jiang <[email protected]> | get_staged_trial | 0959f44b6fc217a4f2766ed46a721eb79b067b2c | ray | ray_trial_executor.py | 10 | 5 | https://github.com/ray-project/ray.git | 3 | 27 | 0 | 17 | 46 | Python | {
"docstring": "Get a trial whose placement group was successfully staged.\n\n Can also return None if no trial is available.\n\n Returns:\n Trial object or None.\n\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 23,
"vocab_size": 22
} | def get_staged_trial(self):
# TODO(xwjiang): This method should consider `self._cached_actor_pg`.
for trial in self._staged_trials:
if self._pg_manager.has_ready(trial):
return trial
return None
|
|
35,986 | 154,453 | 66 | modin/core/dataframe/algebra/default2pandas/resample.py | 12 | 10 | def register(cls, func, squeeze_self=False, **kwargs):
return super().regi | REFACTOR-#4942: remove call method in favor of register due to duplication (#4943)
Signed-off-by: Myachev <[email protected]> | register | a6f47c8e1c27d85fc09926bb35c2f1a65a6d3e79 | modin | resample.py | 9 | 6 | https://github.com/modin-project/modin.git | 1 | 40 | 0 | 12 | 61 | Python | {
"docstring": "\n Build function that do fallback to pandas and aggregate resampled data.\n\n Parameters\n ----------\n func : callable\n Aggregation function to execute under resampled frame.\n squeeze_self : bool, default: False\n Whether or not to squeeze frame before resampling.\n **kwargs : kwargs\n Additional arguments that will be passed to function builder.\n\n Returns\n -------\n callable\n Function that takes query compiler and does fallback to pandas to resample\n time-series data and apply aggregation on it.\n ",
"language": "en",
"n_whitespaces": 196,
"n_words": 70,
"vocab_size": 53
} | def register(cls, func, squeeze_self=False, **kwargs):
return super().register(
Resampler.build_resample(func, squeeze_self),
fn_name=func.__name__,
**kwargs
)
|
|
51,208 | 205,775 | 540 | django/db/models/query.py | 117 | 33 | def aggregate(self, *args, **kwargs):
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwar | Refs #33476 -- Reformatted code with Black. | aggregate | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | query.py | 16 | 30 | https://github.com/django/django.git | 10 | 191 | 0 | 88 | 306 | Python | {
"docstring": "\n Return a dictionary containing the calculations (aggregation)\n over the current queryset.\n\n If args is present the expression is passed as a kwarg using\n the Aggregate object's default alias.\n ",
"language": "en",
"n_whitespaces": 64,
"n_words": 28,
"vocab_size": 23
} | def aggregate(self, *args, **kwargs):
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwargs.values()), method_name="aggregate"
)
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
annotation = query.annotations[alias]
if not annotation.contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
for expr in annotation.get_source_expressions():
if (
expr.contains_aggregate
and isinstance(expr, Ref)
and expr.refs in kwargs
):
name = expr.refs
raise exceptions.FieldError(
"Cannot compute %s('%s'): '%s' is an aggregate"
% (annotation.name, name, name)
)
return query.get_aggregation(self.db, kwargs)
|
|
52,717 | 209,525 | 137 | scapy/contrib/http2.py | 51 | 7 | def _detect_bytelen_from_str(s):
# type: (str) -> int
assert len(s) >= 2
tmp_len = len(s)
i = 1
while orb(s[i]) & 0x80 > 0:
i += 1
assert i < tmp_len, 'EINVAL: s: out-of-bound read: unfinished A | E275 - Missing whitespace after keyword (#3711)
Co-authored-by: Alexander Aring <[email protected]>
Co-authored-by: Anmol Sarma <[email protected]>
Co-authored-by: antoine.torre <[email protected]>
Co-authored-by: Antoine Vacher <[email protected]>
Co-authored-by: Arnaud Ebalard <[email protected]>
Co-authored-by: atlowl <[email protected]>
Co-authored-by: Brian Bienvenu <[email protected]>
Co-authored-by: Chris Packham <[email protected]>
Co-authored-by: CQ <[email protected]>
Co-authored-by: Daniel Collins <[email protected]>
Co-authored-by: Federico Maggi <[email protected]>
Co-authored-by: Florian Maury <[email protected]>
Co-authored-by: _Frky <[email protected]>
Co-authored-by: g-mahieux <[email protected]>
Co-authored-by: gpotter2 <[email protected]>
Co-authored-by: Guillaume Valadon <[email protected]>
Co-authored-by: Hao Zheng <[email protected]>
Co-authored-by: Haresh Khandelwal <[email protected]>
Co-authored-by: Harri Hämäläinen <[email protected]>
Co-authored-by: hecke <[email protected]>
Co-authored-by: Jan Romann <[email protected]>
Co-authored-by: Jan Sebechlebsky <[email protected]>
Co-authored-by: jdiog0 <[email protected]>
Co-authored-by: jockque <[email protected]>
Co-authored-by: Julien Bedel <[email protected]>
Co-authored-by: Keith Scott <[email protected]>
Co-authored-by: Kfir Gollan <[email protected]>
Co-authored-by: Lars Munch <[email protected]>
Co-authored-by: ldp77 <[email protected]>
Co-authored-by: Leonard Crestez <[email protected]>
Co-authored-by: Marcel Patzlaff <[email protected]>
Co-authored-by: Martijn Thé <[email protected]>
Co-authored-by: Martine Lenders <[email protected]>
Co-authored-by: Michael Farrell <[email protected]>
Co-authored-by: Michał Mirosław <[email protected]>
Co-authored-by: mkaliszan <[email protected]>
Co-authored-by: mtury <[email protected]>
Co-authored-by: Neale Ranns <[email protected]>
Co-authored-by: Octavian Toader <[email protected]>
Co-authored-by: Peter Eisenlohr <[email protected]>
Co-authored-by: Phil <[email protected]>
Co-authored-by: Pierre Lalet <[email protected]>
Co-authored-by: Pierre Lorinquer <[email protected]>
Co-authored-by: piersoh <[email protected]>
Co-authored-by: plorinquer <[email protected]>
Co-authored-by: pvinci <[email protected]>
Co-authored-by: Rahul Jadhav <[email protected]>
Co-authored-by: Robin Jarry <[email protected]>
Co-authored-by: romain-perez <[email protected]>
Co-authored-by: rperez <rperez@debian>
Co-authored-by: Sabrina Dubroca <[email protected]>
Co-authored-by: Sebastian Baar <[email protected]>
Co-authored-by: sebastien mainand <[email protected]>
Co-authored-by: smehner1 <[email protected]>
Co-authored-by: speakinghedge <[email protected]>
Co-authored-by: Steven Van Acker <[email protected]>
Co-authored-by: Thomas Faivre <[email protected]>
Co-authored-by: Tran Tien Dat <[email protected]>
Co-authored-by: Wael Mahlous <[email protected]>
Co-authored-by: waeva <[email protected]>
Co-authored-by: Alexander Aring <[email protected]>
Co-authored-by: Anmol Sarma <[email protected]>
Co-authored-by: antoine.torre <[email protected]>
Co-authored-by: Antoine Vacher <[email protected]>
Co-authored-by: Arnaud Ebalard <[email protected]>
Co-authored-by: atlowl <[email protected]>
Co-authored-by: Brian Bienvenu <[email protected]>
Co-authored-by: Chris Packham <[email protected]>
Co-authored-by: CQ <[email protected]>
Co-authored-by: Daniel Collins <[email protected]>
Co-authored-by: Federico Maggi <[email protected]>
Co-authored-by: Florian Maury <[email protected]>
Co-authored-by: _Frky <[email protected]>
Co-authored-by: g-mahieux <[email protected]>
Co-authored-by: gpotter2 <[email protected]>
Co-authored-by: Guillaume Valadon <[email protected]>
Co-authored-by: Hao Zheng <[email protected]>
Co-authored-by: Haresh Khandelwal <[email protected]>
Co-authored-by: Harri Hämäläinen <[email protected]>
Co-authored-by: hecke <[email protected]>
Co-authored-by: Jan Romann <[email protected]>
Co-authored-by: Jan Sebechlebsky <[email protected]>
Co-authored-by: jdiog0 <[email protected]>
Co-authored-by: jockque <[email protected]>
Co-authored-by: Julien Bedel <[email protected]>
Co-authored-by: Keith Scott <[email protected]>
Co-authored-by: Kfir Gollan <[email protected]>
Co-authored-by: Lars Munch <[email protected]>
Co-authored-by: ldp77 <[email protected]>
Co-authored-by: Leonard Crestez <[email protected]>
Co-authored-by: Marcel Patzlaff <[email protected]>
Co-authored-by: Martijn Thé <[email protected]>
Co-authored-by: Martine Lenders <[email protected]>
Co-authored-by: Michael Farrell <[email protected]>
Co-authored-by: Michał Mirosław <[email protected]>
Co-authored-by: mkaliszan <[email protected]>
Co-authored-by: mtury <[email protected]>
Co-authored-by: Neale Ranns <[email protected]>
Co-authored-by: Octavian Toader <[email protected]>
Co-authored-by: Peter Eisenlohr <[email protected]>
Co-authored-by: Phil <[email protected]>
Co-authored-by: Pierre Lalet <[email protected]>
Co-authored-by: Pierre Lorinquer <[email protected]>
Co-authored-by: piersoh <[email protected]>
Co-authored-by: pvinci <[email protected]>
Co-authored-by: Rahul Jadhav <[email protected]>
Co-authored-by: Robin Jarry <[email protected]>
Co-authored-by: romain-perez <[email protected]>
Co-authored-by: rperez <rperez@debian>
Co-authored-by: Sabrina Dubroca <[email protected]>
Co-authored-by: Sebastian Baar <[email protected]>
Co-authored-by: sebastien mainand <[email protected]>
Co-authored-by: smehner1 <[email protected]>
Co-authored-by: Steven Van Acker <[email protected]>
Co-authored-by: Thomas Faivre <[email protected]>
Co-authored-by: Tran Tien Dat <[email protected]>
Co-authored-by: Wael Mahlous <[email protected]>
Co-authored-by: waeva <[email protected]> | _detect_bytelen_from_str | 08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf | scapy | http2.py | 10 | 10 | https://github.com/secdev/scapy.git | 2 | 55 | 0 | 37 | 93 | Python | {
"docstring": " _detect_bytelen_from_str returns the length of the machine\n representation of an AbstractUVarIntField starting at the beginning\n of s and which is assumed to expand over multiple bytes\n (value > _max_prefix_value).\n\n :param str s: the string to parse. It is assumed that it is a multibyte int. # noqa: E501\n :return: The bytelength of the AbstractUVarIntField.\n :raises: AssertionError\n ",
"language": "en",
"n_whitespaces": 113,
"n_words": 56,
"vocab_size": 45
} | def _detect_bytelen_from_str(s):
# type: (str) -> int
assert len(s) >= 2
tmp_len = len(s)
i = 1
while orb(s[i]) & 0x80 > 0:
i += 1
assert i < tmp_len, 'EINVAL: s: out-of-bound read: unfinished AbstractUVarIntField detected' # noqa: E501
ret = i + 1
assert ret >= 0
return ret
|
|
14,597 | 67,696 | 4 | erpnext/stock/doctype/purchase_receipt/test_purchase_receipt.py | 10 | 7 | def get_gl_entries(voucher_type, voucher_no):
return frappe.db.sql(
,
(voucher_type, voucher_no) | style: format code with black | get_gl_entries | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | test_purchase_receipt.py | 8 | 8 | https://github.com/frappe/erpnext.git | 1 | 27 | 0 | 10 | 40 | Python | {
"docstring": "select account, debit, credit, cost_center, is_cancelled\n\t\tfrom `tabGL Entry` where voucher_type=%s and voucher_no=%s\n\t\torder by account desc",
"language": "en",
"n_whitespaces": 14,
"n_words": 17,
"vocab_size": 17
} | def get_gl_entries(voucher_type, voucher_no):
return frappe.db.sql(
,
(voucher_type, voucher_no),
as_dict=1,
)
|
|
53,681 | 213,618 | 17 | ivy/core/random.py | 11 | 7 | def random_normal(mean=0.0, std=1.0, shape=None, dev=None, f=None):
return _cur_framework(f=f).random_normal(mean, | renamed dev_str arg to dev for all methods. | random_normal | d743336b1f3654cd0315f380f43eed4116997c1d | ivy | random.py | 10 | 2 | https://github.com/unifyai/ivy.git | 1 | 46 | 0 | 11 | 61 | Python | {
"docstring": "\n Draws samples from a normal distribution.\n\n :param mean: The mean of the normal distribution to sample from. Default is 0.\n :type mean: float\n :param std: The standard deviation of the normal distribution to sample from. Default is 1.\n :type std: float\n :param shape: Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn.\n If size is None (default), a single value is returned.\n :param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.\n :type dev: str\n :param f: Machine learning framework. Inferred from inputs if None.\n :type f: ml_framework, optional\n :return: Drawn samples from the parameterized uniform distribution.\n ",
"language": "en",
"n_whitespaces": 167,
"n_words": 111,
"vocab_size": 74
} | def random_normal(mean=0.0, std=1.0, shape=None, dev=None, f=None):
return _cur_framework(f=f).random_normal(mean, std, shape, dev)
|
|
23,019 | 108,020 | 29 | lib/matplotlib/texmanager.py | 8 | 5 | def get_font_preamble(cls):
font_preamble, command = cls. | Move towards making texmanager stateless.
Previously, TexManager needed to call get_font_config at a specific
place in the middle of processing to update some internal attributes
before proceeding with TeX source generation. Instead, move towards
making TexManager stateless (except for caching), i.e. the user facing
API should be thought of as a bunch of independently callable functions
`make_tex()`, `make_dvi()`, etc. (they will probably stay as methods on
a "empty" TexManager object for a long time for backcompat, in fact). | get_font_preamble | 13147992b317c29c6e832ca7f6d05bf48aeb0718 | matplotlib | texmanager.py | 8 | 3 | https://github.com/matplotlib/matplotlib.git | 1 | 17 | 0 | 8 | 31 | Python | {
"docstring": "\n Return a string containing font configuration for the tex preamble.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def get_font_preamble(cls):
font_preamble, command = cls._get_font_preamble_and_command()
return font_preamble
|
|
30,836 | 136,154 | 755 | rllib/utils/exploration/tests/test_explorations.py | 147 | 30 | def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=None):
# Test all frameworks.
for _ in framework_iterator(config):
print(f"Algorithm={config.algo_class}")
# Test for both the default Agent's exploration AND the `Random`
# exploration class.
for exploration in [None, "Random"]:
local_config = config.copy()
if exploration == "Random":
local_config.exploration(exploration_config={"type": "Random"})
print("exploration={}".format(exploration or "default"))
algo = local_config.build()
# Make sure all actions drawn are the same, given same
# observations.
actions = []
for _ in range(25):
actions.append(
algo.compute_single_action(
observation=dummy_obs,
explore=Fal | [RLlib] AlgorithmConfig: Replace more occurrences of old config dicts; Make all Algorithms use the non-dict lookup for config properties. (#30096) | do_test_explorations | e715a8b7616f9f24839531fcefc1420f79ab13ec | ray | test_explorations.py | 18 | 36 | https://github.com/ray-project/ray.git | 10 | 231 | 0 | 85 | 357 | Python | {
"docstring": "Calls an Agent's `compute_actions` with different `explore` options.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=None):
# Test all frameworks.
for _ in framework_iterator(config):
print(f"Algorithm={config.algo_class}")
# Test for both the default Agent's exploration AND the `Random`
# exploration class.
for exploration in [None, "Random"]:
local_config = config.copy()
if exploration == "Random":
local_config.exploration(exploration_config={"type": "Random"})
print("exploration={}".format(exploration or "default"))
algo = local_config.build()
# Make sure all actions drawn are the same, given same
# observations.
actions = []
for _ in range(25):
actions.append(
algo.compute_single_action(
observation=dummy_obs,
explore=False,
prev_action=prev_a,
prev_reward=1.0 if prev_a is not None else None,
)
)
check(actions[-1], actions[0])
# Make sure actions drawn are different
# (around some mean value), given constant observations.
actions = []
for _ in range(500):
actions.append(
algo.compute_single_action(
observation=dummy_obs,
explore=True,
prev_action=prev_a,
prev_reward=1.0 if prev_a is not None else None,
)
)
check(
np.mean(actions),
expected_mean_action if expected_mean_action is not None else 0.5,
atol=0.4,
)
# Check that the stddev is not 0.0 (values differ).
check(np.std(actions), 0.0, false=True)
|
|
11,227 | 55,138 | 21 | src/prefect/cli/base.py | 9 | 9 | def exit_with_success(message, **kwargs):
kwargs.setdefault("style", "green")
app.console.prin | Update `set` command; allow CLI `console` object to be patched | exit_with_success | c0cb1fee460c1bded9e3eb741ad7979402844bf8 | prefect | base.py | 8 | 4 | https://github.com/PrefectHQ/prefect.git | 1 | 35 | 0 | 9 | 61 | Python | {
"docstring": "\n Utility to print a stylized success message and exit with a zero code\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 13,
"vocab_size": 12
} | def exit_with_success(message, **kwargs):
kwargs.setdefault("style", "green")
app.console.print(message, **kwargs)
raise typer.Exit(0)
|
|
36,153 | 154,845 | 91 | modin/_version.py | 61 | 7 | def get_keywords() -> Dict[str, str]:
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = " | REFACTOR-#5012: Add mypy checks for singleton files in base modin directory (#5013)
Signed-off-by: Jonathan Shi <[email protected]> | get_keywords | 446148dbf9b66debd0a0dbf9ce778253380d5921 | modin | _version.py | 9 | 7 | https://github.com/modin-project/modin.git | 1 | 38 | 0 | 51 | 76 | Python | {
"docstring": "Get the keywords needed to look up the version information.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def get_keywords() -> Dict[str, str]:
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
|
|
36,416 | 155,520 | 798 | dask/array/slicing.py | 299 | 71 | def take(outname, inname, chunks, index, itemsize, axis=0):
from .core import PerformanceWarning
plan = slicing_plan(chunks[axis], index)
if len(plan) >= len(chunks[axis]) * 10:
factor = math.ceil(len(plan) / len(chunks[axis]))
warnings.warn(
"Slicing with an out-of-order index is generating %d "
"times more chunks" % factor,
PerformanceWarning,
stacklevel=6,
)
if not is_arraylike(index):
index = np.asarray(index)
# Check for chunks from the plan that would violate the user's
# configured chunk size.
nbytes = utils.parse_bytes(config.get("array.chunk-size"))
other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]
other_numel = np.prod([sum(x) for x in other_chunks])
if math.isnan(other_numel):
warnsize = maxsize = math.inf
else:
maxsize = math.ceil(nbytes / (other_numel * itemsize))
warnsize = maxsize * 5
split = config.get("array.slicing.split-large-chunks", None)
# Warn only when the default is not specified.
warned = split is not None
for _, index_list in plan:
if not warned and len(index_list) > warnsize:
msg = (
"Slicing is producing a large chunk. To accept the large\n"
"chunk and silence this warning, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n"
" ... array[indexer]\n\n"
"To avoid creating the large chunks, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n"
" ... array[indexer]"
)
warnings.warn(msg, PerformanceWarning, stacklevel=6)
warned = True
where_index = []
index_lists = []
for where_idx, index_list in plan:
index_length = len(index_list)
if split and index_length > maxsize:
index_sublist = np.array_split(
index_list, math.ceil(index_length / maxsize)
)
index_lists.extend(index_sublist)
where_index.extend([where_idx] * len(index_sublist))
else:
if not is_arraylike(index_list):
index_list = np.array(index_list)
index_lists.append(index_list)
where_index.append(where_idx)
dims = [range(len(bd)) for bd in chunks]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon] * len(bd) for bd in chunks]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
chunks2 = list(chunks)
chunks2[axis] = tuple(map(len, index_lists))
dsk = dict(zip(keys, values))
return tuple(chunks2), dsk
| DOC: normalize whitespace in doctests in slicing.py (#8512) | take | fa8dfede71677a2301d4cd602cf4b27af41cbc4f | dask | slicing.py | 15 | 66 | https://github.com/dask/dask.git | 17 | 509 | 0 | 181 | 824 | Python | {
"docstring": "Index array with an iterable of index\n\n Handles a single index by a single list\n\n Mimics ``np.take``\n\n >>> from pprint import pprint\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0)\n >>> chunks\n ((2, 1, 1),)\n >>> pprint(dsk) # doctest: +ELLIPSIS\n {('y', 0): (<function getitem at ...>, ('x', 0), (array([5, 1]),)),\n ('y', 1): (<function getitem at ...>, ('x', 2), (array([7]),)),\n ('y', 2): (<function getitem at ...>, ('x', 0), (array([3]),))}\n\n When list is sorted we retain original block structure\n\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0)\n >>> chunks\n ((3, 1),)\n >>> pprint(dsk) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n {('y', 0): (<function getitem at ...>,\n ('x', 0),\n (array([1, 3, 5]),)),\n ('y', 1): (<function getitem at ...>, ('x', 2), (array([7]),))}\n\n When any indexed blocks would otherwise grow larger than\n dask.config.array.chunk-size, we might split them,\n depending on the value of ``dask.config.slicing.split-large-chunks``.\n\n >>> import dask\n >>> with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n ... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)],\n ... [0] + [1] * 6 + [2], axis=0, itemsize=8)\n >>> chunks\n ((1, 3, 3, 1), (1000, 1000), (1000, 1000))\n ",
"language": "en",
"n_whitespaces": 339,
"n_words": 191,
"vocab_size": 108
} | def take(outname, inname, chunks, index, itemsize, axis=0):
from .core import PerformanceWarning
plan = slicing_plan(chunks[axis], index)
if len(plan) >= len(chunks[axis]) * 10:
factor = math.ceil(len(plan) / len(chunks[axis]))
warnings.warn(
"Slicing with an out-of-order index is generating %d "
"times more chunks" % factor,
PerformanceWarning,
stacklevel=6,
)
if not is_arraylike(index):
index = np.asarray(index)
# Check for chunks from the plan that would violate the user's
# configured chunk size.
nbytes = utils.parse_bytes(config.get("array.chunk-size"))
other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]
other_numel = np.prod([sum(x) for x in other_chunks])
if math.isnan(other_numel):
warnsize = maxsize = math.inf
else:
maxsize = math.ceil(nbytes / (other_numel * itemsize))
warnsize = maxsize * 5
split = config.get("array.slicing.split-large-chunks", None)
# Warn only when the default is not specified.
warned = split is not None
for _, index_list in plan:
if not warned and len(index_list) > warnsize:
msg = (
"Slicing is producing a large chunk. To accept the large\n"
"chunk and silence this warning, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n"
" ... array[indexer]\n\n"
"To avoid creating the large chunks, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n"
" ... array[indexer]"
)
warnings.warn(msg, PerformanceWarning, stacklevel=6)
warned = True
where_index = []
index_lists = []
for where_idx, index_list in plan:
index_length = len(index_list)
if split and index_length > maxsize:
index_sublist = np.array_split(
index_list, math.ceil(index_length / maxsize)
)
index_lists.extend(index_sublist)
where_index.extend([where_idx] * len(index_sublist))
else:
if not is_arraylike(index_list):
index_list = np.array(index_list)
index_lists.append(index_list)
where_index.append(where_idx)
dims = [range(len(bd)) for bd in chunks]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon] * len(bd) for bd in chunks]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
chunks2 = list(chunks)
chunks2[axis] = tuple(map(len, index_lists))
dsk = dict(zip(keys, values))
return tuple(chunks2), dsk
|
|
50,070 | 202,325 | 61 | tests/contenttypes_tests/test_models.py | 11 | 9 | def test_multidb(self):
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using="default"), self.assertNumQueries(
1, using="other"
):
ContentType.objects.get_for_model(Author)
| Refs #33476 -- Reformatted code with Black. | test_multidb | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | test_models.py | 11 | 6 | https://github.com/django/django.git | 1 | 44 | 0 | 11 | 78 | Python | {
"docstring": "\n When using multiple databases, ContentType.objects.get_for_model() uses\n db_for_read().\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 7,
"vocab_size": 7
} | def test_multidb(self):
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using="default"), self.assertNumQueries(
1, using="other"
):
ContentType.objects.get_for_model(Author)
|
|
84,312 | 282,822 | 151 | gamestonk_terminal/econometrics/econometrics_model.py | 103 | 31 | def get_engle_granger_two_step_cointegration_test(y, x):
warnings.simplefilter(action="ignore", category=FutureWarning)
long_run_ols = sm.OLS(y, sm.add_constant(x))
warnings.simplefilter(action="default", category=FutureWarning)
long_run_ols_fit = long_run_ols.fit()
c, gamma = long_run_ols_fit.params
z = long_run_ols_fit.resid
short_run_ols = sm.OLS(y.diff().iloc[1:], (z.shift().iloc[1:]))
short_run_ols_fit = short_run_ol | Econometrics notebooks API (#1462)
* Add initial implementations of the API wrappers
* Fix typos in docstrings
* Fix typos an markdown linting errors in docs
* Ditch using insecure eval in favor of secure getattr
* Add GST notebooks API documentation
* Add notebook screenshot to the GST API docs | get_engle_granger_two_step_cointegration_test | 1b914d45e8575827c05a432d56846f5c5f2559c4 | OpenBBTerminal | econometrics_model.py | 13 | 12 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 147 | 0 | 88 | 230 | Python | {
"docstring": "Estimates long-run and short-run cointegration relationship for series y and x and apply\n the two-step Engle & Granger test for cointegration.\n\n Uses a 2-step process to first estimate coefficients for the long-run relationship\n y_t = c + gamma * x_t + z_t\n\n and then the short-term relationship,\n y_t - y_(t-1) = alpha * z_(t-1) + epsilon_t,\n\n with z the found residuals of the first equation.\n\n Then tests cointegration by Dickey-Fuller phi=1 vs phi < 1 in\n z_t = phi * z_(t-1) + eta_t\n\n If this implies phi < 1, the z series is stationary is concluded to be\n stationary, and thus the series y and x are concluded to be cointegrated.\n\n Parameters\n ----------\n y : pd.Series\n The first time series of the pair to analyse.\n\n x : pd.Series\n The second time series of the pair to analyse.\n\n Returns\n -------\n c : float\n The constant term in the long-run relationship y_t = c + gamma * x_t + z_t. This\n describes the static shift of y with respect to gamma * x.\n\n gamma : float\n The gamma term in the long-run relationship y_t = c + gamma * x_t + z_t. This\n describes the ratio between the const-shifted y and x.\n\n alpha : float\n The alpha term in the short-run relationship y_t - y_(t-1) = alpha * z_(t-1) + epsilon. This\n gives an indication of the strength of the error correction toward the long-run mean.\n\n z : pd.Series\n Series of residuals z_t from the long-run relationship y_t = c + gamma * x_t + z_t, representing\n the value of the error correction term.\n\n dfstat : float\n The Dickey Fuller test-statistic for phi = 1 vs phi < 1 in the second equation. A more\n negative value implies the existence of stronger cointegration.\n\n pvalue : float\n The p-value corresponding to the Dickey Fuller test-statistic. A lower value implies\n stronger rejection of no-cointegration, thus stronger evidence of cointegration.\n\n ",
"language": "en",
"n_whitespaces": 494,
"n_words": 315,
"vocab_size": 127
} | def get_engle_granger_two_step_cointegration_test(y, x):
warnings.simplefilter(action="ignore", category=FutureWarning)
long_run_ols = sm.OLS(y, sm.add_constant(x))
warnings.simplefilter(action="default", category=FutureWarning)
long_run_ols_fit = long_run_ols.fit()
c, gamma = long_run_ols_fit.params
z = long_run_ols_fit.resid
short_run_ols = sm.OLS(y.diff().iloc[1:], (z.shift().iloc[1:]))
short_run_ols_fit = short_run_ols.fit()
alpha = short_run_ols_fit.params[0]
# NOTE: The p-value returned by the adfuller function assumes we do not estimate z
# first, but test stationarity of an unestimated series directly. This assumption
# should have limited effect for high N, however. Critical values taking this into
# account more accurately are provided in e.g. McKinnon (1990) and Engle & Yoo (1987).
adfstat, pvalue, _, _, _ = adfuller(z, maxlag=1, autolag=None)
return c, gamma, alpha, z, adfstat, pvalue
|
|
51,823 | 206,982 | 44 | tests/admin_changelist/tests.py | 12 | 11 | def test_deterministic_order_for_unordered_model(self):
superuser = self._create_superuser("superuser")
for counter in range(1, 51):
| Refs #33476 -- Reformatted code with Black. | test_deterministic_order_for_unordered_model | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 10 | 18 | https://github.com/django/django.git | 2 | 118 | 0 | 12 | 63 | Python | {
"docstring": "\n The primary key is used in the ordering of the changelist's results to\n guarantee a deterministic order, even when the model doesn't have any\n default ordering defined (#17198).\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 28,
"vocab_size": 25
} | def test_deterministic_order_for_unordered_model(self):
superuser = self._create_superuser("superuser")
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
|
Subsets and Splits