content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
import logging
def dataframe2naf(
df_meta: pd.DataFrame,
overwrite_existing_naf: bool=False,
rerun_files_with_naf_errors: bool=False,
engine: str=None,
naf_version: str=None,
dtd_validation: bool=False,
params: dict={},
nlp=None,
) -> pd.DataFrame:
"""Batch processor for NAF
Args:
df_meta: the dataframe containing the meta data for the NAF files.
overwrite_existing_naf: if True then existing NAF files are overwritten (default = False)
rerun_files_with_naf_errors: if True then documents that produced NAF errors are run again (default = False)
engine: name of the NLP processor to be used (default = None)
naf_version: NAF version to be used
dtd_validation: perform validation of each NAF file (default = False)
params: additional parameters for NAF conversion
Returns:
pd.DataFrame: the dataframe with (updated) metadata
"""
if "naf:status" not in df_meta.columns:
df_meta["naf:status"] = ""
for row in df_meta.index:
if "dc:language" in df_meta.columns:
dc_language = df_meta.loc[row, "dc:language"].lower()
else:
dc_language = None
df_meta.loc[
row, "naf:status"] = "ERROR, no dc:language in DataFrame"
if "dc:source" in df_meta.columns:
dc_source = df_meta.loc[row, "dc:source"]
else:
dc_source = None
df_meta.loc[row, "naf:status"] = "ERROR, no dc:source in DataFrame"
if "naf:source" in df_meta.columns and not pd.isna(df_meta.loc[row, "naf:source"]):
output = df_meta.loc[row, "naf:source"]
else:
if dc_source is not None:
output = os.path.splitext(dc_source)[0] + ".naf.xml"
if dc_source and dc_language and output:
# logging per processed file
log_file: str = os.path.splitext(dc_source)[0] + ".log"
logging.basicConfig(filename=log_file,
level=logging.WARNING, filemode="w")
if os.path.exists(output) and not overwrite_existing_naf:
# the NAF file exists and we should not overwrite existing naf
# files -> skip
df_meta.loc[row, "naf:status"] = "OK"
df_meta.loc[row, "naf:source"] = output
continue
elif (
"error" in df_meta.loc[row, "naf:status"].lower()
and not rerun_files_with_naf_errors
):
# the status is ERROR and we should not rerun files with errors
# -> skip
continue
else:
# put columns in params
params = {
col: df_meta.loc[row, col]
for col in df_meta.columns
if col not in ["naf:source", "naf:status"]
}
try:
doc = parse2naf.generate_naf(
input=dc_source,
engine=engine,
language=dc_language,
naf_version=naf_version,
dtd_validation=dtd_validation,
params=params,
nlp=nlp,
)
if not os.path.exists(output):
doc.write(output)
else:
if overwrite_existing_naf:
doc.write(output)
df_meta.loc[row, "naf:status"] = "OK"
df_meta.loc[row, "naf:source"] = output
except:
df_meta.loc[row, "naf:status"] = "ERROR, generate_naf"
return df_meta
|
6bc26b849a456421457868bdd83fd2362ca1e757
| 18,900 |
import json
import re
from datetime import datetime
import random
def dev_view(request, slug=""):
"""View for homepage or individual developer."""
if slug == "":
dev_name = list(Dev.objects.all().values_list('dev_name', flat=True))
dev_img_address = list(Dev.objects.values_list('dev_image_address', flat=True))
dev_slug = list(Dev.objects.values_list('dev_slug', flat=True))
dev_order = list(Dev.objects.values_list('dev_order_pop', flat=True))
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_query').values())) == 1:
g_query_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values_list()[0])
else:
task_id_query = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values())[1]['task_id']
g_query_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_query
).values_list()[0])
g_query_datetime = g_query_datetime_init[11]
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values_list()[0])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_dev
).values_list()[0])
g_dev_datetime = g_dev_datetime_init[11]
if g_dev_datetime > g_query_datetime:
g_datetime = g_dev_datetime
elif g_dev_datetime < g_query_datetime:
g_datetime = g_query_datetime
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_query').values())) == 1:
g_query = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values()[0]['result'])
else:
task_id_query = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values())[1]['task_id']
g_query = json.loads(TaskResult.objects.filter(
task_id=task_id_query
).values()[0]['result'])
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values()[0]['result'])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev = json.loads(TaskResult.objects.filter(
task_id=task_id_dev
).values()[0]['result'])
# 2-day date filter for homepage 'Latest News'
def date_criteria(g_inp):
dates = [re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', g_inp[i][8]).group(0) for i in range(len(g_inp))]
dates_datetime = [datetime.strptime(i, '%Y-%m-%d') for i in dates]
today = datetime.today()
time_criteria = datetime(year=today.year, month=today.month, day=today.day - 2)
return [g_inp[i] for i in range(len(g_inp)) if dates_datetime[i] >= time_criteria]
entries_for_carousel_init = [date_criteria(g_dev) + date_criteria(g_query)][0]
entries_for_carousel = [i for i in entries_for_carousel_init if i[9] != 'none']
entries_for_latest_news_init = entries_for_carousel
entries_for_latest_news_init = sorted(entries_for_latest_news_init, key=lambda sort: sort[8], reverse=True)
link_latest_news = [i[1] for i in entries_for_latest_news_init]
link_count = [link_latest_news.count(link_latest_news[i]) for i in
range(len(link_latest_news))]
link_zip = list(zip(link_latest_news, link_count))
link_unique = [link_zip[i][0] if link_zip[i][1] == 1 else 'none' for i in
range(len(link_zip))]
nonunique_indices_link = [i for i, x in enumerate(link_unique) if x == "none"]
nonunique_check_link = []
nonunique_entries_nonrepeat_link = []
for i in nonunique_indices_link:
nonunique_check_link.append(link_latest_news[i])
count_inst = nonunique_check_link.count(link_latest_news[i])
if count_inst == 1:
nonunique_entries_nonrepeat_link.append(entries_for_latest_news_init[i])
google_search_results_unique = []
for i in range(len(link_unique)):
try:
if link_unique[i] != 'none':
google_search_results_unique.append(entries_for_latest_news_init[i])
except IndexError:
pass
google_search_results_combined = google_search_results_unique + nonunique_entries_nonrepeat_link
page = request.GET.get('page', 1)
paginator2 = Paginator(google_search_results_combined, 2000)
try:
entries_for_latest_news = paginator2.page(page)
except PageNotAnInteger:
entries_for_latest_news = paginator2.page(1)
except EmptyPage:
entries_for_latest_news = paginator2.page(paginator2.num_pages)
random.shuffle(entries_for_carousel)
if request.user.is_authenticated:
if request.method == "POST":
p_form = FavoriteGamesUpdateForm(data=request.POST)
user_fav = list(FavoriteGames.objects.all().values_list())
user_slug_list = [user_fav[i][2] for i in range(len(user_fav))
if user_fav[i][1] == request.user.profile.id]
if request.POST["dev_user_str"] not in user_slug_list:
if p_form.is_valid():
form_instance = p_form.save(commit=False)
form_instance.profile = Profile.objects.get(user=request.user)
form_instance.dev_user_str = p_form.cleaned_data["dev_user_str"]
form_instance.save()
else:
FavoriteGames.objects.filter(
profile_id=request.user.profile.id
).filter(
dev_user_str=request.POST.get('dev_user_str')
).delete()
fav_game_check = list(FavoriteGames.objects.filter(profile_id=request.user.profile.id).values())
devs_in_favs = [fav_game_check[i]['dev_user_str'] for i in range(len(fav_game_check))]
dev_game_check_list = []
for j, i in enumerate(dev_slug):
if i in devs_in_favs:
dev_game_check_list.append('yes')
else:
dev_game_check_list.append('no')
else:
dev_game_check_list = ""
dev_list_name = sorted(list(zip_longest(dev_name, dev_img_address, dev_slug, dev_game_check_list, dev_order)),
key=lambda lowercase: lowercase[0].lower())
dev_list_pop = sorted(list(zip_longest(dev_name, dev_img_address, dev_slug, dev_game_check_list, dev_order)),
key=lambda dev_order_list: dev_order_list[4])
cache_key = "test_cache_key"
if cache.get(cache_key) is not None:
paginator_for_class_1 = Paginator(cache.get(cache_key), 48)
else:
cache.set(
cache_key,
dev_list_pop,
60 * 60 * 4,
)
context = {
'numbers': dev_list_pop,
'entries': entries_for_carousel,
'latest_news': entries_for_latest_news,
'g_query_datetime': g_query_datetime,
'g_dev_datetime': g_dev_datetime,
'g_datetime': g_datetime,
}
if request.method == "POST":
return redirect("/")
else:
return render(request, "homepage/dev_base.html", context)
else:
dev_query_results_init = TaskResult.objects.filter(task_name='homepage.tasks.rawg_fetch_dev')
dev_query_results = json.loads(dev_query_results_init.values()[0]['result'])
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values_list()[0])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_dev
).values_list()[0])
g_dev_datetime = g_dev_datetime_init[11]
slug_index1 = [dev_query_results][0][0].index(slug)
dev_list = [dev_query_results[0][slug_index1]]
slugs_per_dev_list = dev_query_results[1][slug_index1]
names_per_dev_list = dev_query_results[2][slug_index1]
ratings_per_dev_list = dev_query_results[3][slug_index1]
background_img_per_dev_list = dev_query_results[4][slug_index1]
released_per_dev_list = dev_query_results[5][slug_index1]
full_clip_per_dev_list = dev_query_results[6][slug_index1]
ratings_count_per_dev_list = dev_query_results[7][slug_index1]
dev_game_data = sorted(list(zip_longest(dev_list, slugs_per_dev_list, names_per_dev_list,
ratings_per_dev_list, background_img_per_dev_list,
released_per_dev_list,
full_clip_per_dev_list, ratings_count_per_dev_list)),
key=lambda sort: sort[7], reverse=True)
dev_game_data2 = []
for i in range(len(dev_game_data)):
try:
if dev_game_data[i][4] is not None:
dev_game_data2.append(dev_game_data[i])
except IndexError:
pass
page = request.GET.get('page', 1)
paginator2 = Paginator(dev_game_data2, 2000)
try:
numbers = paginator2.page(page)
except PageNotAnInteger:
numbers = paginator2.page(1)
except EmptyPage:
numbers = paginator2.page(paginator2.num_pages)
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
google_query_results = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values()[0]['result'])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
google_query_results = json.loads(TaskResult.objects.filter(
task_id=task_id_dev
).values()[0]['result'])
dev_name_list = list(Dev.objects.all().values_list('dev_name', flat=True))
dev_slug_list = list(Dev.objects.all().values_list('dev_slug', flat=True))
dev_img_list = list(Dev.objects.values_list('dev_image_address', flat=True))
dev_slug_index = dev_slug_list.index(slug)
dev_name_for_site = dev_name_list[dev_slug_index]
dev_img_for_site = dev_img_list[dev_slug_index]
google_search_results = [google_query_results[i] if google_query_results[i][6] == slug else 'none'
for i in range(len(google_query_results))]
google_search_results2 = []
for i in range(len(google_search_results)):
try:
if google_search_results[i] != 'none':
google_search_results2.append(google_search_results[i])
except IndexError:
pass
context = {
'numbers': numbers,
'google_search_results': google_search_results2,
'dev_name_for_site': dev_name_for_site,
'dev_img_for_site': dev_img_for_site,
'g_dev_datetime': g_dev_datetime,
}
return render(request, "homepage/dev_iter.html", context)
|
363819f854e26b8c62f8fe41fbfbf2e64296246f
| 18,901 |
def dpuEnableTaskProfile(task):
"""
Enable profiling facility of DPU Task while running to get its performance metrics
task: DPU Task. This parameter should be gotten from the result of dpuCreatTask()
Returns: 0 on success, or report error in case of any failure
"""
return pyc_libn2cube.pyc_dpuEnableTaskProfile(task)
|
5bb1435ca194b214695891d451f2f56a4cdf6857
| 18,902 |
def get_isotopic_distribution(z):
"""
For an element with number ``z``, returns two ``np.ndarray`` objects containing that element's weights and relative abundances.
Args:
z (int): atomic number
Returns:
masses (np.ndarray): list of isotope masses
weights (np.ndarray): list of weights (relative to 1.00 for largest)
"""
z = str(z)
masses = list(ISOTOPE_DICTIONARY[z].keys())
weights = list(ISOTOPE_DICTIONARY[z].values())
return np.array(masses), np.array(weights)
|
4b038319c37dfd13f0ef085c2b3286f6fc2749c3
| 18,903 |
def url_root():
"""根路径"""
return """
<p>Hello ! Welcome to Rabbit's WebServer Platform !</p>
<a href="http://www.miibeian.gov.cn/" target="_blank" style="">京ICP备 18018365 号</a> @2018Rabbit
"""
|
2e6d1d5301ac67bdec30cdeeaeed3c8638568de9
| 18,904 |
import uuid
def CreateMatrix(args, context, history_id, gcs_results_root, release_track):
"""Creates a new iOS matrix test in Firebase Test Lab from the user's params.
Args:
args: an argparse namespace. All the arguments that were provided to this
gcloud command invocation (i.e. group and command arguments combined).
context: {str:obj} dict containing the gcloud command context, which
includes the Testing API client+messages libs generated by Apitools.
history_id: {str} A history ID to publish Tool Results to.
gcs_results_root: the root dir for a matrix within the GCS results bucket.
release_track: the release track that the command is invoked from.
Returns:
A TestMatrix object created from the supplied matrix configuration values.
"""
creator = MatrixCreator(args, context, history_id, gcs_results_root,
release_track)
return creator.CreateTestMatrix(uuid.uuid4().hex)
|
e536001e768f2574d6c5d773b70e6b4e58c6c3da
| 18,905 |
import os
import requests
from pathlib import Path
def _download_and_extract_zip_from_github(site_info):
"""
from https://pelican-blog/archive/master.zip
to: /path/pelican-blog
"""
unique_id = uuid4().hex
zip_file_url = site_info["ZIP_URL"]
zip_file_name = os.path.join(
settings.PELICAN_PUBLISHER["WORKING_ROOT"],
"{}-{}.zip".format(site_info["NAME"], unique_id),
)
site_stage_path = os.path.join(
settings.PELICAN_PUBLISHER["WORKING_ROOT"],
"{}-{}".format(site_info["NAME"], unique_id),
)
logger.debug("zip file name: {}".format(zip_file_name))
logger.debug("site stage path: {}".format(site_stage_path))
# download zip file
r = requests.get(zip_file_url)
if not r.ok:
logger.error("download failed")
return None, None
open(zip_file_name, "wb").write(r.content)
logger.info("download finished")
# extract zip file
ZipFile(zip_file_name).extractall(site_stage_path)
# pelican site file check
site_file_path = None
for p in Path(site_stage_path).glob("*"):
if p.is_dir():
site_file_path = p.as_posix()
break
if site_file_path is None:
logger.error("pelican site file extract failed, more subdir")
return None, None
logger.info("extracted pelican site file to: {}".format(site_file_path))
return site_stage_path, site_file_path
|
2a6e90434d8cfd1cd1e2084ff3ef575d725f87e4
| 18,906 |
def map_keys(func, d):
""" Returns a new dict with func applied to keys from d, while values
remain unchanged.
>>> D = {'a': 1, 'b': 2}
>>> map_keys(lambda k: k.upper(), D)
{'A': 1, 'B': 2}
>>> assert map_keys(identity, D) == D
>>> map_keys(identity, {})
{}
"""
return dict((func(k), v) for k, v in d.iteritems())
|
5e9798d208db5e43dad497d64a4b8e469c67eb3b
| 18,907 |
from qiniu import Auth
def generate_qiniu_token(object_name, use_type, expire_time=600):
"""
用于生成七牛云上传所需要的Token
:param object_name: 上传到七牛后保存的文件名
:param use_type: 操作类型
:param expire_time: token过期时间,默认为600秒,即十分钟
:return:
"""
bucket_name = PRIVATE_QINIU_BUCKET_NAME
# 需要填写你的 Access Key 和 Secret Key
access_key = PRIVATE_QINIU_ACCESS_KEY
secret_key = PRIVATE_QINIU_SECRET_KEY
# 构建鉴权对象
q = Auth(access_key, secret_key)
# 上传策略示例
# https://developer.qiniu.com/kodo/manual/1206/put-policy
policy = {
# 'callbackUrl':'https://requestb.in/1c7q2d31',
# 'callbackBody':'filename=$(fname)&filesize=$(fsize)'
# 'persistentOps':'imageView2/1/w/200/h/200'
}
token = q.upload_token(bucket_name, object_name, expire_time, policy)
base_url = PRIVATE_MEDIA_URL_PREFIX
return (object_name, token, base_url, expire_time)
|
9d0b65fb08032ad557f50cb73c00b4ed0f8eae5a
| 18,908 |
def get_s3_object(bucket, key_name, local_file):
"""Download a S3 object to a local file in the execution environment
Parameters
----------
bucket: string, required
S3 bucket that holds the message
key: string, required
S3 key is the email object
Returns
-------
email_msg: email.message.Message object
"""
tracer.put_metadata('object', f's3://{bucket}/{key_name}')
try:
s3_resource.Bucket(bucket).download_file(key_name, local_file)
result = 'ok'
tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')
except Exception as e:
tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')
result = f'Error: {str(e)}'
return(result)
|
02b10623e30eff1ee5093d4e0f1ee51b3b97d0ac
| 18,909 |
def jaxpr_eqns_input_sizes(jaxpr) -> np.ndarray:
"""Return a list of input sizes for each equation in the jaxpr.
Args:
jaxpr: Jaxpr to get input sizes for.
Returns:
A #eqns * #eqns numpy array of input sizes. cost[l, r] represents the
input size of the l-th to (r - 1)-th equation in the jaxpr.
"""
length = len(jaxpr.eqns)
input_sizes = np.full((length + 1, length + 1), 0, dtype=np.float32)
outvars = OrderedSet()
for k in range(0, length + 1):
if k > 0:
outvars = outvars.union(jaxpr.eqns[k - 1].outvars)
invars = OrderedSet()
total_size = 0
for r in range(k + 1, length + 1):
for invar in jaxpr.eqns[r - 1].invars:
if (isinstance(invar, Var) and invar in outvars and
invar not in invars):
invars.add(invar)
total_size += invar.aval.size * invar.aval.dtype.itemsize
input_sizes[k, r] = total_size
return input_sizes
|
0209c0342725ae83ea8051ef47852134e6ad4502
| 18,910 |
def extract_message(raw_html):
"""Returns the content of the message element.
This element appears typically on pages with errors.
:param raw_html: Dump from any page.
"""
results = re_message.findall(raw_html)
if results:
return results[0]
return None
|
498ee1c38c08db365b1bf91ecd32a79c2d2f5f68
| 18,911 |
from typing import Callable
from typing import Tuple
def _weighted_essentially_non_oscillatory_vectorized(
eno_order: int, values: Array, spacing: float, boundary_condition: Callable[[Array, int],
Array]) -> Tuple[Array, Array]:
"""Implements a more "vectorized" but ultimately slower version of `weighted_essentially_non_oscillatory`."""
if eno_order < 1:
raise ValueError(f"`eno_order` must be at least 1; got {eno_order}.")
values = boundary_condition(values, eno_order)
diffs = (values[1:] - values[:-1]) / spacing
if eno_order == 1:
return (diffs[:-1], diffs[1:])
substencil_approximations = _align_substencil_values(
jax.vmap(jnp.correlate, (None, 0), 0)(diffs, _diff_coefficients(eno_order)), jnp)
diffs2 = diffs[1:] - diffs[:-1]
chol_T = jnp.asarray(np.linalg.cholesky(_smoothness_indicator_quad_form(eno_order)).swapaxes(-1, -2))
smoothness_indicators = _align_substencil_values(
jnp.sum(jnp.square(jax.vmap(jax.vmap(jnp.correlate, (None, 0), 1), (None, 0), 0)(diffs2, chol_T)), -1), jnp)
unscaled_weights = 1 / jnp.square(smoothness_indicators + WENO_EPS)
unnormalized_weights = (jnp.asarray(_substencil_coefficients(eno_order)[..., np.newaxis]) *
jnp.stack([unscaled_weights[:, :-1], unscaled_weights[:, 1:]]))
weights = unnormalized_weights / jnp.sum(unnormalized_weights, 1, keepdims=True)
return tuple(jnp.sum(jnp.stack([substencil_approximations[:-1], substencil_approximations[1:]]) * weights, 1))
|
debd652ddf02419e191d9d0c5d21640760d3f227
| 18,912 |
def defaults(dictionary, overwriteNone=False, **kwargs):
"""
Set default values of a given dictionary, option to overwrite None values.
Returns given dictionary with values updated by kwargs unless they already existed.
:param dict dictionary:
:param overwriteNone: Whether to overwrite None values.
:param kwargs:
"""
for key, value in dictionary.items():
dictValueIsNone = value is None
kwargsHasValue = key in kwargs
if overwriteNone and dictValueIsNone and kwargsHasValue:
continue
# Overwrite kwargs with dictionary
kwargs[key] = value
return kwargs
|
6def5bb71839b3b627a5597ea6fa7fa1b48e463b
| 18,913 |
from typing import Union
from typing import Optional
from typing import Dict
import tqdm
def expected_average_shortest_distance_to_miner(
crawl_graph: Union[
ProbabilisticWeightedCrawlGraph[CrawledNode], CrawlGraph[CrawledNode]
],
distances: Optional[np.ndarray] = None,
miner_probability: Optional[Dict[CrawledNode, float]] = None,
) -> Dict[CrawledNode, float]:
"""Estimates the average shortest distance to a miner for each node in the graph"""
if not isinstance(crawl_graph, ProbabilisticWeightedCrawlGraph):
crawl_graph = ProbabilisticWeightedCrawlGraph(crawl_graph)
if miner_probability is None:
miner_probability = estimate_miner_probability(crawl_graph)
if distances is None:
distances = crawl_graph.probabilistic_shortest_distances()
elif (
distances.ndim != 2
or distances.shape[0] != len(crawl_graph)
or distances.shape[1] != len(crawl_graph)
):
raise ValueError(
f"distances is expected to be an {len(crawl_graph)}x{len(crawl_graph)} matrix"
)
return {
node: sum(
distances[index][i] * miner_probability[crawl_graph.nodes[i]]
for i in range(len(crawl_graph))
)
for node, index in tqdm(
((n, crawl_graph.node_indexes[n]) for n in crawl_graph),
desc="calculating expected distance to miners",
leave=False,
unit=" nodes",
total=len(crawl_graph),
)
}
|
6ea56881dce6d589eebec6422a0a5ffae41fe153
| 18,914 |
from typing import Callable
def dummy_state_sb(dummy_state: State, dummy_train_dataloader: DataLoader, conv_model: MosaicClassifier,
loss_fun_tuple: Callable, epoch: int, batch: int) -> State:
"""Dummy state with required values set for Selective Backprop
"""
dummy_state.train_dataloader = dummy_train_dataloader
dummy_state.epoch = epoch
dummy_state.step = epoch * dummy_state.steps_per_epoch + batch
dummy_state.model = conv_model
dummy_state.model.module.loss = loss_fun_tuple
return dummy_state
|
4f4af7193ccf0a4fb883a7d4b42ef58da49333b3
| 18,915 |
def create_model(species={}, parameters={}, reactions={}, events={}):
"""Returns an SBML Level 3 model.
Example:
species = { 'E': 1, \
'EM': 0, \
'EM2': 0, \
'F': 100, \
}
parameters = {'k': (1e-06,'per_min'), \
}
reactions = { 'Production_E': \
{ 're': [(1,'E'),(1,'F')], \
'pr': [(2,'E')], \
'kin' : 'k * E * F' \
}, \
}
events = {'e': \
{ 'trigger': 'true', \
'delay': '10', \
'assignments': [('M','1'),], \
}, \
}
"""
# Create an empty SBMLDocument object. It's a good idea to check for
# possible errors. Even when the parameter values are hardwired like
# this, it is still possible for a failure to occur (e.g., if the
# operating system runs out of memory).
try:
document = sbml.SBMLDocument(3, 1)
except ValueError:
raise RuntimeError("Could not create SBMLDocumention object")
# Create the basic Model object inside the SBMLDocument object.
model = document.createModel()
check(model, "create model")
check(model.setTimeUnits("second"), "set model-wide time units")
check(model.setExtentUnits("item"), "set model units of extent")
check(
model.setSubstanceUnits("item"), "set model substance units"
) # mole, item, gram, kilogram, dimensionless
# Create a unit definition we will need later.
per_second = model.createUnitDefinition()
check(per_second, "create unit definition")
check(per_second.setId("per_min"), "set unit definition id")
unit = per_second.createUnit()
check(unit, "create unit")
check(unit.setKind(sbml.UNIT_KIND_SECOND), "set unit kind")
check(unit.setExponent(-1), "set unit exponent")
check(unit.setScale(0), "set unit scale")
check(
unit.setMultiplier(1), "set unit multiplier"
)
# Create a compartment inside this model
c1 = model.createCompartment()
check(c1, "create compartment")
check(c1.setId("c1"), "set compartment id")
check(c1.setConstant(True), 'set compartment "constant"')
check(c1.setSize(1), 'set compartment "size"')
check(c1.setSpatialDimensions(3), "set compartment dimensions")
check(
c1.setUnits("dimensionless"), "set compartment size units"
)
# Create species inside this model, set the required attributes
# for each species in SBML Level 3 (which are the 'id', 'compartment',
# 'constant', 'hasOnlySubstanceUnits', and 'boundaryCondition'
# attributes), and initialize the amount of the species along with the
# units of the amount.
for s_str, s_val in species.items():
s = model.createSpecies()
check(s, "create species")
check(s.setId(s_str), "set species id")
check(s.setCompartment("c1"), "set species compartment")
check(s.setConstant(False), 'set "constant" attribute')
check(s.setInitialAmount(float(s_val)), "set initial amount")
check(s.setSubstanceUnits("item"), "set substance units")
check(s.setBoundaryCondition(False), 'set "boundaryCondition"')
check(s.setHasOnlySubstanceUnits(False), 'set "hasOnlySubstanceUnits"')
# Create a parameter object inside this model, set the required
# attributes 'id' and 'constant' for a parameter in SBML Level 3, and
# initialize the parameter with a value along with its units.
for k_str in parameters:
k = model.createParameter()
check(k, "create parameter k")
check(k.setId(k_str), "set parameter id")
check(k.setConstant(True), 'set parameter "constant"')
check(k.setValue(parameters[k_str][0]), "set parameter value")
check(k.setUnits(parameters[k_str][1]), "set parameter units")
# Create a reaction inside this model, set the reactants and products,
# and set the reaction rate expression (the SBML "kinetic law"). We
# set the minimum required attributes for all of these objects. The
# units of the reaction rate are determined from the 'timeUnits' and
# 'extentUnits' attributes on the Model object.
for r_str in reactions:
r = model.createReaction()
check(r, "create reaction")
check(r.setId(r_str), "set reaction id")
check(r.setReversible(False), "set reaction reversibility flag")
check(r.setFast(False), 'set reaction "fast" attribute')
reactants = reactions[r_str]["re"]
for re_val, re_str in reactants:
species_ref = r.createReactant()
check(species_ref, "create reactant")
check(species_ref.setSpecies(re_str), "assign reactant species")
check(species_ref.setStoichiometry(re_val), "set set stoichiometry")
check(species_ref.setConstant(True), 'set "constant" on species')
products = reactions[r_str]["pr"]
for pr_val, pr_str in products:
species_ref = r.createProduct()
check(species_ref, "create product")
check(species_ref.setSpecies(pr_str), "assign product species")
check(species_ref.setStoichiometry(pr_val), "set set stoichiometry")
check(species_ref.setConstant(True), 'set "constant" on species')
math_ast = sbml.parseL3Formula(reactions[r_str]["kin"])
kinetic_law = r.createKineticLaw()
check(math_ast, f"create AST for rate expression")
check(kinetic_law, "create kinetic law")
check(kinetic_law.setMath(math_ast), "set math on kinetic law")
# create events
for e_str in events:
e = model.createEvent()
check(e, "create event")
check(e.setId(e_str), "set id")
check(e.setUseValuesFromTriggerTime(False), "?")
t = model.createTrigger()
check(t, "create trigger")
check(
t.setMath(sbml.parseL3Formula(events[e_str]["trigger"])),
"set trigger condition",
)
check(t.setPersistent(False), "default not persistent")
check(t.setInitialValue(False), "default not initially true")
check(e.getTrigger().getMath(), 'Problem when creating the trigger condition. The trigger will not work.')
# print( '> ' + sbml.formulaToString(e.getTrigger().getMath()) )
d = model.createDelay()
check(d, "create delay")
check(d.setMath(sbml.parseFormula(events[e_str]["delay"])), "set math")
check(e.setDelay(d), "set delay")
for ass in events[e_str]["assignments"]:
ea = model.createEventAssignment()
check(ea, "check event assignment")
check(ea.setVariable(ass[0]), "set variable")
check(ea.setMath(sbml.parseL3Formula(ass[1])), "set math")
return document
|
1950509f83b858ef7829aa6f30caaa3734ff2946
| 18,916 |
def get_network_connection_query(endpoint_ids: str, args: dict) -> str:
"""Create the network connection query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
remote_ip_list = args.get('remote_ip', '')
if not remote_ip_list:
raise DemistoException('Please provide a remote_ip argument.')
remote_ip_list = wrap_list_items_in_double_quotes(remote_ip_list)
local_ip_filter = ''
if args.get('local_ip'):
local_ip_list = wrap_list_items_in_double_quotes(args.get('local_ip', ''))
local_ip_filter = f'and action_local_ip in({local_ip_list})'
port_list = args.get('port')
port_list_filter = f'and action_remote_port in({port_list})' if port_list else ''
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = STORY
{local_ip_filter} and action_remote_ip in({remote_ip_list}) {port_list_filter}|
fields agent_hostname, agent_ip_addresses, agent_id, actor_effective_username, action_local_ip, action_remote_ip,
action_remote_port, dst_action_external_hostname, action_country, actor_process_image_name, actor_process_image_path,
actor_process_command_line, actor_process_image_sha256, actor_process_instance_id, actor_process_causality_id'''
|
6390c6ae4436632055fb90687e51cfac2ca09a05
| 18,917 |
import json
def dump_into_json(filename, metrics):
"""Dump the metrics dictionary into a JSON file
It will automatically dump the dictionary:
metrics = {'duration': duration,
'voltage_extremes': voltage_extremes,
'num_beats': num_beats,
'mean_hr_bpm': mean_hr_bpm,
'beats': beats}.
in to a JSON file with the file name as the data file name.
:param filename: name of the file being read
:param metrics: a dictionary containing duration,
voltage extremes, number of beats, beats per minute,
and the time where beats occur
:returns:
- successful_JSON - test if it has successfully create JSON
"""
successful_JSON = False
try:
output_file = open(filename + '.json', 'w')
json.dump(metrics, output_file)
output_file.close()
successful_JSON = True
except TypeError:
print("Unsuccessfully output JSON file")
return successful_JSON
|
2e6effbcefe7cb3033c4c472cbee3850c00ae06b
| 18,918 |
def _costfun(params, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd):
"""
Compute residuals.
`params` contains camera parameters and 3-D coordinates.
"""
if isinstance(params, (tuple, list)):
params = np.array(params)
params = np.hstack((pose0, params))
poses, pts3d = _unpack(params, n_cams, n_pts if len(fixed_pt3d) == 0 else 0)
points_3d = fixed_pt3d if len(pts3d) == 0 else pts3d
points_proj = _project(points_3d[pt3d_idxs], poses[cam_idxs], K)
px_err = ((pts2d - points_proj) / px_err_sd[:, None]).ravel()
return px_err
|
3e97e7d14712fe8b89b60de958fd743c728e8cba
| 18,919 |
def dbg_get_memory_info(*args):
"""
dbg_get_memory_info() -> PyObject *
This function returns the memory configuration of a debugged process.
@return:
None if no debugger is active
tuple(start_ea, end_ea, name, sclass, sbase, bitness, perm)
"""
return _ida_idd.dbg_get_memory_info(*args)
|
f79234d724f33eb8311aff8b6b04ddb9325953c0
| 18,920 |
import base64
import requests
def get_headers(base_url: str, client_id: str, client_secret: str, grant_type: str, verify: bool):
"""
Create header with OAuth 2.0 authentication information.
:type base_url: ``str``
:param base_url: Base URL of the IdentityIQ tenant.
:type client_id: ``str``
:param client_id: Client Id for OAuth 2.0.
:type client_secret: ``str``
:param client_secret: Client Secret for OAuth 2.0.
:type grant_type: ``str``
:param grant_type: Grant Type for OAuth 2.0. Defaulted to 'client_credentials' if not provided.
:return: Header with OAuth 2.0 information if client_id & client_secret are provided, else None.
This will return None if the client_id & client_secret were not valid (authorized).
"""
if base_url is None or client_id is None or client_secret is None:
return None
if grant_type is None:
grant_type = 'client_credentials'
auth_cred = client_id + ':' + client_secret
iiq_oauth_body = f'grant_type={grant_type}'
iiq_oauth_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % base64.b64encode(auth_cred.encode()).decode()
}
oauth_response = requests.request("POST", url=f'{base_url}{IIQ_OAUTH_EXT}', data=iiq_oauth_body,
headers=iiq_oauth_headers, verify=verify)
if oauth_response is not None and 200 <= oauth_response.status_code < 300:
return {
'Authorization': 'Bearer %s' % oauth_response.json().get('access_token', None),
'Content-Type': 'application/json'
}
else:
err_msg = 'Failed to get response'
if oauth_response is not None:
err_msg += f' {oauth_response.status_code}'
raise DemistoException(err_msg)
|
06ced982595d4abe99e193ec7ab43e366d575f7b
| 18,921 |
import logging
from datetime import datetime
def draw_pie(fracs, labels):
"""
This method is to plot the pie chart of labels, then save it into '/tmp/' folder
"""
logging.info("Drawing the pie chart..")
fig = plt.figure()
plt.pie(fracs, labels=labels, autopct=make_autopct(fracs), shadow=True)
plt.title("Top 10 labels for newly opened issues")
figname = "piechart_{}_{}.png".format(str(datetime.datetime.today().date()),
str(datetime.datetime.today().time()))
fig.savefig("/tmp/{}".format(figname))
pic_path = "/tmp/{}".format(figname)
return pic_path
|
18ee8d0b6054467b9612e282c0d12fa9a10c549b
| 18,922 |
import re
def eval_function_old(param, param_type):
""" Eval Function (Deprecated)
isOwner 0xe982E462b094850F12AF94d21D470e21bE9D0E9C
:param param:
:param param_type:
:return:
"""
try:
splitted_input = param.split(' ')
except TypeError:
pass
else:
try:
print(splitted_input)
if len(splitted_input[1][2:]) != 40:
print('launch error, address must be 40 alfanumeric hash')
else:
re.search('0x[0-9,aA-zZ]{40}', splitted_input[1]).group(0)
except IndexError:
print('there is not enough data to verify current input')
pass
return splitted_input[1]
|
6c28fdad6803330bcea8b086cc2e15209125a8d6
| 18,923 |
def _multi_convert(value):
"""
Function try and convert numerical values to numerical types.
"""
try:
value = int(value, 10)
except ValueError:
try:
value = float(value)
except ValueError:
pass
return value
|
abcd3656fdf5ce7ab1427ee6884a18853bdfaf59
| 18,924 |
def dbinom(n, p):
"""Binomial Distribution
n = number of repetitions
p = success probability
Used when a certain experiment is repeated n times
with a 0 ≤ P ≤ 1 probability to succeed once.
This doesn't return a value, but rather the specified binomial function
"""
def b(k):
"""Returns the probability of k successes"""
if 0 <= k <= n:
q = 1 - p
return rperm(n, k) * p**k * q**(n-k)
else:
return 0
# Allow accessing the used 'n' and 'p' values from the function
b.__dict__['n'] = n
b.__dict__['p'] = p
b.__dict__['expected'] = n * p
b.__dict__['variance'] = (n * p) * (1-p)
return b
|
8917b3eb5ce189094f2b129c596a99d20dfcdcc5
| 18,925 |
def array_to_image(x, data_format='channels_last'):
"""Converts a 3D Numpy array to a PIL Image instance.
Args:
x: Input Numpy array.
data_format: Image data format, either "channels_first" or "channels_last".
Returns:
A PIL Image instance.
Raises:
ValueError: if invalid `x` or `data_format` is passed.
"""
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape: %s' % (x.shape,))
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format: %s' % data_format)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if x.shape[2] == 4:
return Image.fromarray(x.astype('uint8'), 'RGBA')
elif x.shape[2] == 3:
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
if np.max(x) > 255:
return Image.fromarray(x[:, :, 0].astype('int32'), 'I')
return Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: %s' % (x.shape[2],))
|
2278a317e6d820b9d1aee2d7d796261b14d719f2
| 18,926 |
import time
import logging
import re
def worker_process_download_tvtorrent(
tvTorUnit, client = None, maxtime_in_secs = 14400,
num_iters = 1, kill_if_fail = False ):
"""
Used by, e.g., :ref:`get_tv_batch`, to download missing episodes on the Plex_ TV library. Attempts to use the Deluge_ server, specified in :numref:`Seedhost Services Setup`, to download an episode. If successful then uploads the finished episode from the remote SSH server to the Plex_ server and local directory, specified in :numref:`Local and Remote (Seedhost) SSH Setup`.
:param dict tvTorUnit: a :py:class:`dict` representing a summarized magnet link searching operation on an episode. The format and meaning of this data structure is described in :py:meth:`create_tvTorUnits <howdy.tv.tv.create_tvTorUnits>`.
:param DelugeRPC client: optional argument, the `DelugeRPCClient <Deluge RPC client_>`_ object that at a low level uses the Deluge_ server to download the Magnet link at the remote SSH server. If ``None``, then this client is created using :py:meth:`get_deluge_client <howdy.core.core_deluge.get_deluge_client>`.
:param int maxtime_in_secs: optional argument, the maximum time to wait for a Magnet link found by the Jackett_ server to fully download through the Deluge_ server. Must be :math:`\ge 60` seconds. Default is 14400 seconds.
:param int num_iters: optional argument, the maximum number of Magnet links to try and fully download before giving up. The list of Magnet links to try for each missing episode is ordered from *most* seeders + leechers to *least*. Must be :math:`\ge 1`. Default is 1.
:param bool kill_if_fail: optional argument. If ``True``, then on failing operation kill the torrent download on the Deluge_ server and delete any files associated with it. If ``False``, then keep the torrent download on failure.
:returns: If successful, creates a two element :py:class:`tuple`: the first element is the base name of the episode that is uploaded to the Plex_ server, and the second element is a status :py:class:`dictionary <dict>` with three keys.
* the ``status`` is ``"SUCCESS"``.
* the ``message`` describes the final status of the operation.
* the ``time`` tells how long, in seconds, the successful operation took.
If unsuccessful, returns a failing tuple: the first element is ``None``, and the the second element is a status :py:class:`dictionary <dict>` with three keys.
* the ``status`` is ``"FAILURE"``.
* the ``message`` describes the illuminating reason as to how this operation failed.
* the ``time`` tells how long, in seconds, the failing operation took.
:rtype: tuple
.. seealso::
* :ref:`get_tv_batch`.
* :py:meth:`get_remaining_episodes <howdy.tv.tv.get_remaining_episodes>`.
* :py:meth:`create_tvTorUnits <howdy.tv.tv.create_tvTorUnits>`.
* :py:meth:`download_batched_tvtorrent_shows <howdy.tv.tv.download_batched_tvtorrent_shows>`.
.. _`Deluge RPC client`: https://github.com/JohnDoee/deluge-client
.. _Deluge: https://en.wikipedia.org/wiki/Deluge_(software)
"""
time0 = time.time( )
assert( maxtime_in_secs > 0 )
#
if client is None:
client, status = core_deluge.get_deluge_client( )
if client is None:
return None, _create_status_dict(
'FAILURE', 'cannot create or run a valid deluge RPC client.', time0 )
#
## now get list of torrents, choose "top" one
def _process_jackett_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
do_raw = tvTorUnit[ 'do_raw' ]
logging.info( 'jackett start: %s, %s, %s' % (
torFileName, mustHaveString, series_name ) )
#
## try this twice if it can
torFileNameAlt = re.sub('\(([0-9]+)\)', '', torFileName ).strip( )
torFileNames = [ torFileName, ]
if torFileNameAlt != torFileName: torFileNames.append( torFileNameAlt )
for tfn in torFileNames:
logging.info( 'processing jackett from "%s", using "%s" now, at %0.3f seconds after start.' % (
torFileName, tfn, time.time( ) - time0 ) )
data, status = get_tv_torrent_jackett(
tfn, maxnum = 100, keywords = [ 'x264', 'x265', '720p' ],
minsizes = [ minSize, minSize_x265 ],
maxsizes = [ maxSize, maxSize_x265 ],
keywords_exc = [ 'xvid' ], raw = do_raw,
must_have = [ mustHaveString ] )
if status == 'SUCCESS': break
if status != 'SUCCESS':
shared_list.append( ( 'jackett', _create_status_dict( 'FAILURE', status, t0 ), 'FAILURE' ) )
return
logging.info( 'successfully processed jackett on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'jackett', data, 'SUCCESS' ) )
#
def _process_eztv_io_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
logging.info( 'eztv.io start: %s' % torFileName )
#
data, status = get_tv_torrent_eztv_io(
torFileName, maxnum = 100, series_name = series_name,
minsizes = [ minSize, minSize_x265],
maxsizes = [ maxSize, maxSize_x265] )
if status != 'SUCCESS':
shared_list.append(
( 'eztv.io', _create_status_dict( 'FAILURE', status, time0 ), 'FAILURE' ) )
return
data_filt = list(filter(
lambda elem: any(map(lambda tok: tok in elem['title'].lower( ),
( 'x264', 'x265', '720p' ) ) ) and
'xvid' not in elem['title'].lower( ), data ) )
if len( data_filt ) == 0:
shared_list.append(
( 'eztv.io', _create_status_dict(
'FAILURE', 'ERROR, COULD NOT FIND %s IN EZTV.IO.' % torFileName, t0 ), 'FAILURE' ) )
return
logging.info( 'successfully processed eztv.io on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'eztv.io', data_filt, 'SUCCESS' ) )
#
def _process_zooqle_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
logging.info( 'zooqle start: %s' % torFileName )
#
data, status = get_tv_torrent_zooqle( torFileName, maxnum = 100 )
if status != 'SUCCESS':
shared_list.append( ( 'zooqle', _create_status_dict( 'FAILURE', status, t0 ), 'FAILURE' ) )
return
data_filt = list(filter(
lambda elem: any(map(lambda tok: tok in elem['title'].lower( ),
( 'x264', 'x265', '720p' ) ) ) and
'xvid' not in elem['title'].lower( ) and
elem['torrent_size'] >= minSize*1e6 and
elem['torrent_size'] <= maxSize*1e6, data ) )
if len( data_filt ) == 0:
shared_list.append(
( 'zooqle', _create_status_dict(
'FAILURE', 'ERROR, COULD NOT FIND %s IN ZOOQLE.' % torFileName, t0 ), 'FAILURE' ) )
logging.info( 'successfully processed zooqle on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'zooqle', data_filt, 'SUCCESS' ) )
m = Manager( )
shared_list = m.list( )
jobs = [ ]
for targ in ( _process_jackett_items, _process_eztv_io_items, _process_zooqle_items ):
job = Process( target = targ, args = ( tvTorUnit, shared_list ) )
job.daemon = False
jobs.append( job )
job.start( )
for job in jobs: job.join( )
for job in jobs: job.close( )
#shared_list = list(map(
# lambda proc: proc( tvTorUnit ),
# ( _process_jackett_items, _process_eztv_io_items, _process_zooqle_items ) ) )
error_tup = list(map(
lambda dat: ( dat[0], dat[1] ), filter(lambda dat: dat[-1] == 'FAILURE', shared_list ) ) )
data = list( chain.from_iterable( map(lambda dat: dat[1],
filter(lambda dat: dat[-1] == 'SUCCESS', shared_list ) ) ) )
#
## status of downloaded elements
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
if len( data ) == 0:
return None, dict( error_tup )
print( 'got %d candidates for %s in %0.3f seconds.' % (
len(data), torFileName, time.time( ) - time0 ) )
#
## wrapped away in another method
return _worker_process_tvtorrents(
client, data, torFileName, totFname,
maxtime_in_secs, num_iters, kill_if_fail )
|
899b13f70d1673168eab4b533ce7e5219d25d365
| 18,927 |
from operator import or_
def find_fixture(
gameweek,
team,
was_home=None,
other_team=None,
kickoff_time=None,
season=CURRENT_SEASON,
dbsession=session,
):
"""Get a fixture given a gameweek, team and optionally whether
the team was at home or away, the kickoff time and the other team in the
fixture.
"""
fixture = None
if not isinstance(team, str):
team_name = get_team_name(team, season=season, dbsession=dbsession)
else:
team_name = team
if not team_name:
raise ValueError("No team with id {} in {} season".format(team, season))
if other_team and not isinstance(other_team, str):
other_team_name = get_team_name(other_team, season=season, dbsession=dbsession)
else:
other_team_name = other_team
query = (
dbsession.query(Fixture).filter_by(gameweek=gameweek).filter_by(season=season)
)
if was_home is True:
query = query.filter_by(home_team=team_name)
elif was_home is False:
query = query.filter_by(away_team=team_name)
elif was_home is None:
query = query.filter(
or_(Fixture.away_team == team_name, Fixture.home_team == team_name)
)
else:
raise ValueError("was_home must be True, False or None")
if other_team_name:
if was_home is True:
query = query.filter_by(away_team=other_team_name)
elif was_home is False:
query = query.filter_by(home_team=other_team_name)
elif was_home is None:
query = query.filter(
or_(
Fixture.away_team == other_team_name,
Fixture.home_team == other_team_name,
)
)
fixtures = query.all()
if not fixtures or len(fixtures) == 0:
raise ValueError(
"No fixture with season={}, gw={}, team_name={}, was_home={}, other_team_name={}".format(
season, gameweek, team_name, was_home, other_team_name
)
)
if len(fixtures) == 1:
fixture = fixtures[0]
elif kickoff_time:
# team played multiple games in the gameweek, determine the
# fixture of interest using the kickoff time,
kickoff_date = dateparser.parse(kickoff_time)
kickoff_date = kickoff_date.replace(tzinfo=timezone.utc)
kickoff_date = kickoff_date.date()
for f in fixtures:
f_date = dateparser.parse(f.date)
f_date = f_date.replace(tzinfo=timezone.utc)
f_date = f_date.date()
if f_date == kickoff_date:
fixture = f
break
if not fixture:
raise ValueError(
"No unique fixture with season={}, gw={}, team_name={}, was_home={}, kickoff_time={}".format(
season, gameweek, team_name, was_home, kickoff_time
)
)
return fixture
|
fcf90acd4fd8dd663c5cdf2ec99bd428c8cf7a45
| 18,928 |
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
|
acb70b2b7d6b16fbe2cfc9f559606efd504b8e3f
| 18,929 |
def fft(array, nfft=None, dim=None, dx=None, detrend=None, tapering=False,
shift=True, sym=False, chunks=None):
"""Compute the spectrum on several dimensions of xarray.DataArray objects
using the Fast Fourrier Transform parrallelized with dask.
Parameters
----------
array : xarray.DataArray
Array from which compute the spectrum
dim : str or sequence
Dimensions along which to compute the spectrum
dx : float or sequence, optional
Define the resolution of the dimensions. If not precised,
the resolution is computed directly from the coordinates associated
to the dimensions.
detrend : {None, 'mean', 'linear'}, optional
Remove the mean or a linear trend before the spectrum computation
tapering : bool, optional
If True, tapper the data with a Tukey window
shift : bool, optional
If True, the frequency axes are shifted to center the 0 frequency,
otherwise negative frequencies follow positive frequencies as in
numpy.fft.ftt
sym : bool, optional
If True, force the spectrum to be symmetrical even if the input data
is real
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``
Returns
-------
res : DataArray
A multi-dimensional complex DataArray with the corresponding
dimensions transformed in the Fourier space.
Notes
-----
If the input data is real, a real fft is performed over the first
dimension, which is faster. Then the transform over the remaining
dimensions are computed with the classic fft.
"""
temp_nfft, new_dim = _utils.infer_n_and_dims(array, nfft, dim)
new_nfft = _utils.infer_arg(temp_nfft, dim)
new_dx = _utils.infer_arg(dx, dim)
if detrend is 'mean':
# Tackling the issue of the dask graph by computing and loading the
# mean here
for di in new_dim:
mean_array = array.mean(dim=di).load()
preproc_array = array - mean_array
elif detrend is 'linear':
preproc_array = _detrend(array, new_dim)
else:
preproc_array = array
if tapering:
preproc_array = _tapper(array, new_dim)
# TODO: Check if this part may work with dask using np.iscomplexobj
# If the array is complex, set the symmetry parameters to True
if np.any(np.iscomplex(array)):
sym = True
spectrum_array, spectrum_coords, spectrum_dims = \
_fft(preproc_array, new_nfft, new_dim, new_dx, shift=shift,
chunks=chunks, sym=sym)
spec = xr.DataArray(spectrum_array, coords=spectrum_coords,
dims=spectrum_dims, name='spectrum')
_compute_norm_factor(spec, new_nfft, new_dim, new_dx, tapering, sym=sym)
return spec
|
dd672487df9f4988c53706a2886f499e56a5ff4c
| 18,930 |
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
from typing import Union
def make_symmetric_matrix(d: Union[list, float]) -> list:
"""
d (list or float):
len(d) == 1: Suppose cubic system
len(d) == 3: Suppose tetragonal or orthorhombic system
len(d) == 6: Suppose the other system
"""
if isinstance(d, float):
tensor = [[d, 0, 0], [0, d, 0], [0, 0, d]]
elif len(d) == 9:
tensor = [[d[0], d[1], d[2]], [d[3], d[4], d[5]], [d[6], d[7], d[8]]]
elif len(d) == 1:
tensor = [[d[0], 0, 0], [0, d[0], 0], [0, 0, d[0]]]
elif len(d) == 3:
tensor = [[d[0], 0, 0], [0, d[1], 0], [0, 0, d[2]]]
elif len(d) == 6:
"""
Given a symmetric matrix in upper triangular matrix form as flat array
indexes as:
[A_xx, A_yy, A_zz, A_xy, A_xz, A_yz]
This will generate the full matrix:
[[A_xx, A_xy, A_xz], [A_xy, A_yy, A_yz], [A_xz, A_yz, A_zz]
"""
tensor = make_symmetric_matrix_from_upper_tri(d).tolist()
else:
raise ValueError("{} is not valid to make symmetric matrix".format(d))
return tensor
|
318caf380a8f0a0878eac54bae49c86722e532bb
| 18,931 |
def convert_lds_to_block_tridiag(As, bs, Qi_sqrts, ms, Ri_sqrts):
"""
Parameterize the LDS in terms of pairwise linear Gaussian dynamics
and per-timestep Gaussian observations.
p(x_{1:T}; theta)
= [prod_{t=1}^{T-1} N(x_{t+1} | A_t x_t + b_t, Q_t)]
* [prod_{t=1}^T N(x_t | m_t, R_t)]
We can rewrite this as a Gaussian with a block tridiagonal precision
matrix J. The blocks of this matrix are:
J_{t,t} = A_t.T Q_t^{-1} A_t + Q_{t-1}^{-1} + R_t^{-1}
J_{t,t+1} = -Q_t^{-1} A_t
The linear term is h_t
h_t = -A_t.T Q_t^{-1} b_t + Q_{t-1}^{-1} b_{t-1} + R_t^{-1} m_t
We parameterize the model in terms of
theta = {A_t, b_t, Q_t^{-1/2}}_{t=1}^{T-1}, {m_t, R_t^{-1/2}}_{t=1}^T
"""
T, D = ms.shape
assert As.shape == (T-1, D, D)
assert bs.shape == (T-1, D)
assert Qi_sqrts.shape == (T-1, D, D)
assert Ri_sqrts.shape == (T, D, D)
# Construnct the inverse covariance matrices
Qis = np.matmul(Qi_sqrts, np.swapaxes(Qi_sqrts, -1, -2))
Ris = np.matmul(Ri_sqrts, np.swapaxes(Ri_sqrts, -1, -2))
# Construct the joint, block-tridiagonal precision matrix
J_lower_diag = -np.matmul(Qis, As)
J_diag = np.concatenate([-np.matmul(np.swapaxes(As, -1, -2), J_lower_diag), np.zeros((1, D, D))]) \
+ np.concatenate([np.zeros((1, D, D)), Qis]) \
+ Ris
# Construct the linear term
h = np.concatenate([np.matmul(J_lower_diag, bs[:, :, None])[:, :, 0], np.zeros((1, D))]) \
+ np.concatenate([np.zeros((1, D)), np.matmul(Qis, bs[:, :, None])[:, :, 0]]) \
+ np.matmul(Ris, ms[:, :, None])[:, :, 0]
return J_diag, J_lower_diag, h
|
d16721ffb77f06cd55ca3c70238ca56fad76970d
| 18,932 |
def extract_string_from_tensor(input_ids, mode="single", config=None, tokenizer=None):
"""
Args:
input_ids (Tensor): input sentences with shape [batch_size, seq_len].
mode (str): ["pair", "single"]
"pair" for tasks with paired inputs `<bos> A <eos> B <eos>`,
such as summarization task, the dataset format `<bos> Article <eos> Summary <eos>`,
reading comprehension task, the dataset format `<bos> Passage Question <eos> Answer <eos>`.
"single" for tasks with single input `<bos> A <eos>`, such as Language Modeling, Lambada task.
config: the configuration of GPT-2 model.
tokenizer: the tokenizer of GPT-2 model.
Return:
prompt_list (list): list of prompt_text
reference_list (list): list of reference_text, or second part of text
rest_list (list): list of rest_text, or rest part of text
"""
batch_size = config.batch_size
seq_length = config.seq_length
prompt_list = [""] * batch_size
reference_list = [""] * batch_size
eos_text = tokenizer.eos_token
len_eos_text = len(eos_text)
input_ids_np = input_ids.asnumpy()
input_ids_np = input_ids_np.reshape((batch_size, seq_length))
# input_ids = P.Reshape()(input_ids, (batch_size, seq_length))
if mode == "pair":
for batch_idx in range(batch_size):
sentence_tensor = input_ids_np[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
reference_start = prompt_end + len_eos_text
reference_end = sentence[reference_start:].find(
eos_text, 0) + reference_start
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
reference_list[batch_idx] = sentence[reference_start:reference_end]
return prompt_list, reference_list
# For single output datasets such as WikiText, etc.
if mode == "single":
for batch_idx in range(batch_size):
sentence_tensor = input_ids_np[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
else:
raise NotImplementedError('mode:{} not supported.'.format(mode))
return prompt_list
|
27cf8905350db53ec908f3b8ef8674a7ac3a17eb
| 18,933 |
def schema_validation_matching(source_fields, target_fields):
"""Compare schemas between two dictionary objects"""
results = []
# Go through each source and check if target exists and matches
for source_field_name, source_field_type in source_fields.items():
# target field exists
if source_field_name in target_fields:
# target data type matches
if source_field_type == target_fields[source_field_name]:
results.append(
[
source_field_name,
source_field_name,
"1",
"1",
consts.VALIDATION_STATUS_SUCCESS,
"Source_type:{} Target_type:{}".format(
source_field_type, target_fields[source_field_name]
),
]
)
# target data type mismatch
else:
results.append(
[
source_field_name,
source_field_name,
"1",
"1",
consts.VALIDATION_STATUS_FAIL,
"Data type mismatch between source and target. Source_type:{} Target_type:{}".format(
source_field_type, target_fields[source_field_name]
),
]
)
# target field doesn't exist
else:
results.append(
[
source_field_name,
"N/A",
"1",
"0",
consts.VALIDATION_STATUS_FAIL,
"Target doesn't have a matching field name",
]
)
# source field doesn't exist
for target_field_name, target_field_type in target_fields.items():
if target_field_name not in source_fields:
results.append(
[
"N/A",
target_field_name,
"0",
"1",
consts.VALIDATION_STATUS_FAIL,
"Source doesn't have a matching field name",
]
)
return results
|
7af82c39462de09e326c6f4413a2d6be7fd6c977
| 18,934 |
import pkg_resources
def find_thirdparty_marshaller_plugins():
""" Find, but don't load, all third party marshaller plugins.
Third party marshaller plugins declare the entry point
``'hdf5storage.marshallers.plugins'`` with the name being the
Marshaller API version and the target being a function that returns
a ``tuple`` or ``list`` of all the marshallers provided by that
plugin when given the hdf5storage version (``str``) as its only
argument.
.. versionadded:: 0.2
Returns
-------
plugins : dict
The marshaller obtaining entry points from third party
plugins. The keys are the Marshaller API versions (``str``) and
the values are ``dict`` of the entry points, with the module
names as the keys (``str``) and the values being the entry
points (``pkg_resources.EntryPoint``).
See Also
--------
supported_marshaller_api_versions
"""
all_plugins = tuple(pkg_resources.iter_entry_points(
'hdf5storage.marshallers.plugins'))
return {ver: {p.module_name: p
for p in all_plugins if p.name == ver}
for ver in supported_marshaller_api_versions()}
|
7aad132f520b67d5b39e857175e4bc006fd3ad72
| 18,935 |
def justTransportResponse(transport):
"""
Helper function for creating a Response which uses the given transport.
All of the other parameters to L{Response.__init__} are filled with
arbitrary values. Only use this method if you don't care about any of
them.
"""
return Response((b'HTTP', 1, 1), 200, b'OK', _boringHeaders, transport)
|
02a18a500cb9a623c287d4e2f3777237e3574ef6
| 18,936 |
import os
def get_tools_location() -> str:
"""Get the path to the Alteryx Python SDK Tools directory."""
admin_path = os.path.join(os.environ["APPDATA"], "Alteryx", "Tools")
user_path = os.path.join(os.environ["PROGRAMDATA"], "Alteryx", "Tools")
if contains_path(__file__, admin_path):
return admin_path
if contains_path(__file__, user_path):
return user_path
raise RuntimeError("Tool is not located in Alteryx install locations.")
|
4ceedb06eac222939384ef5dff34f10652b37525
| 18,937 |
def object_comparator_lookup(src_obj, dst_obj):
"""
Compare an object with another entry by entry
"""
dont_match = []
no_upstream = []
for i in dst_obj:
count_name = 0
count_value = 0
for j in src_obj:
if list(j.keys())[0] == list(i.keys())[0]:
count_name = 1
if j[list(j.keys())[0]] == i[list(i.keys())[0]]:
count_value = 1
if count_name == 0:
if list(i.keys())[0] != "last-modified":
print(i.keys(), list(i.keys())[0])
no_upstream.append(i)
else:
if count_value == 0:
dont_match.append(i)
if no_upstream or dont_match:
return 1
else:
return 0
|
ba5767624255da915d9c07d25b62880c387f6f00
| 18,938 |
def line(
data_frame=None,
x=None,
y=None,
line_group=None,
color=None,
line_dash=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
line_shape=None,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a 2D line plot, each row of `data_frame` is represented as vertex of
a polyline mark in 2D space.
"""
return make_figure(args=locals(), constructor=go.Scatter)
|
bcedfe2c9297f4d3c049e500265f9ffbc0dde85a
| 18,939 |
def is_primitive(v):
"""
Checks if v is of primitive type.
"""
return isinstance(v, (int, float, bool, str))
|
d22607c0e2b93b82b1da6beb50de68668624dd71
| 18,940 |
def linkify_only_full_urls(attrs, new=False):
"""Linkify only full links, containing the scheme."""
if not new: # This is an existing <a> tag, leave it be.
return attrs
# If the original text doesn't contain the scheme, don't linkify.
if not attrs['_text'].startswith(('http:', 'https:')):
return None
return attrs
|
89fcc7f3fc53353686260779ae8ddb4c0523c57b
| 18,941 |
import os
def retrieve_model_list(opt):
"""
retrive the model information from form a directory.
:param opt: parser
:return: list of Checkpoint object.
"""
files = os.listdir(os.path.join(opt.dir, 'models'))
files.sort()
# file name format "address/$NAME_acc_XX.YY_ppl_XX.YY_eZZ.pt"
valid_address = []
for address in files:
name = os.path.basename(address)
step = calc_us_in_name(name) - 5
if valid_model_name(name, opt, step=step):
lst = name.strip().split('_')
valid_address.append(
Checkpoints(
os.path.join(
os.path.join(opt.dir, 'models'),
address
),
str(lst[0+step]),
float(lst[2+step]),
float(lst[4+step]),
Checkpoints.str2epoch(lst[5+step])
)
)
try:
assert len(valid_address) != 0
except AssertionError as e:
print("{0}\nNo valid model found in {1} with name={2}."
.format(e, opt.dir, opt.name))
raise
return valid_address
|
0b1bcd662d0ca8ec62def67e8e3fe0fff7825de4
| 18,942 |
import os
def _read_filenames_in_dir(path, extension):
"""Returns the name of the Yaml files in a certain directory
Arguments
---------
path: str
Path to directory
extension: str
Extension of files (such as: '.yml' or '.csv')
Returns
-------
list
The list of files in `path` with extension
"""
files = []
for filename in os.listdir(path):
if filename.endswith(extension):
files.append(os.path.splitext(filename)[0])
return files
|
6f16cd0f39d7df93fbbf8ede7236e373f1d905df
| 18,943 |
import argparse
import os
def setup_cccc_tool_plugin(use_plugin_context=True, binary=None, cccc_config=None):
"""Create an instance of the CCCC plugin."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--cccc-bin", dest="cccc_bin")
arg_parser.add_argument("--cccc-config", dest="cccc_config")
resources = Resources(
[os.path.join(os.path.dirname(statick_tool.__file__), "plugins")]
)
config = Config(resources.get_file("config.yaml"))
ctp = CCCCToolPlugin()
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
plugin_context.args.output_directory = os.path.dirname(__file__)
if binary:
plugin_context.args.cccc_bin = binary
if cccc_config:
plugin_context.args.cccc_config = cccc_config
if use_plugin_context:
ctp.set_plugin_context(plugin_context)
return ctp
|
5b3b3e54f01967004a0ecfa938c1a7bec581ce11
| 18,944 |
def Precedence(op):
"""The numeric precedence of a binary operator."""
# Particularly convenient during layout of binary operators.
return float(sum(i * (op in grp[1:])
for i, grp in enumerate(precedence))) / len(precedence)
|
0071d2972474c57376c334401c43673f1c4bde49
| 18,945 |
from typing import Dict
from typing import Any
def _FeastToExampleTransform(
pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str
) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to TF examples.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, a BigQuery sql string.
Returns:
PCollection of TF examples.
"""
# Load custom config dictionary
custom_config = _load_custom_config(exec_properties["custom_config"])
# Get Feast retrieval job
retrieval_job = _get_retrieval_job(
entity_query=split_pattern, custom_config=custom_config
)
# Setup datasource and converter.
if isinstance(retrieval_job, BigQueryRetrievalJob):
query = retrieval_job.to_sql()
# Internally Beam creates a temporary table and exports from the query.
datasource = utils.ReadFromBigQuery(query=query)
converter = converters._BigQueryConverter(
query, _get_gcp_project(exec_properties)
)
else:
raise NotImplementedError(
f"Support for {type(retrieval_job)} is not available yet. For now we only support BigQuery source."
)
# Setup converter from dictionary of str -> value to bytes
map_function = None
out_format = exec_properties.get(
"output_data_format", example_gen_pb2.FORMAT_TF_EXAMPLE
)
if out_format == example_gen_pb2.FORMAT_TF_EXAMPLE:
map_function = converter.RowToExampleBytes
elif out_format == example_gen_pb2.FORMAT_TF_SEQUENCE_EXAMPLE:
map_function = converter.RowToSequenceExampleBytes
else:
raise NotImplementedError(
f"Format {out_format} is not currently supported. Currently we only support tfexample"
)
# Setup pipeline
return (
pipeline
| "DataRetrieval" >> datasource
| f"To{out_format.capitalize()}Bytes" >> beam.Map(map_function)
)
|
76451a98b9e11188eda42b5141e73647b87df94b
| 18,946 |
import os
def fluxional_mode(atom_indices, span=360.0):
""" Writes the string for each fluxional mode
"""
# Format the aotm indices string
atom_indices = util.format_flux_mode_indices(atom_indices)
# Create dictionary to fill template
flux_mode_keys = {
'atom_indices': atom_indices,
'span': span,
}
# Set template name and path for a monte carlo species section
template_file_name = 'fluxional_mode.mako'
template_file_path = os.path.join(MONTE_CARLO_PATH, template_file_name)
# Build monte carlo section string
flux_str = Template(filename=template_file_path).render(**flux_mode_keys)
return flux_str
|
489f1f82e8eaf535ae446b41e2e7307ab97f7cd8
| 18,947 |
import re
import logging
def parse_host_info(qhost_tree, queues_tree, queues_to_ignore=[]):
"""
:return: dictionary key: host, value HostInfo
"""
dctRet = {}
for host_node in qhost_tree.findall('host'):
host_name = host_node.get('name')
dct_hostvalues = dict([(hostvalue_node.get('name'), hostvalue_node.text) for hostvalue_node in host_node.findall('hostvalue')])
if dct_hostvalues['num_proc'] != '-':
slots = int(dct_hostvalues['num_proc'])
slots_used = sum([int(slots_used_node.text) for slots_used_node in host_node.findall(".//queuevalue[@name='slots_used']")])
memory = dehumanize_memory(dct_hostvalues['mem_total'])
mem_used = 0 if dct_hostvalues['mem_used'] == '-' else dehumanize_memory(dct_hostvalues['mem_used'])
dctRet[host_name] = HostInfo(host=host_name, slots=slots, memory=memory, state=None, slots_used=slots_used,
mem_used=mem_used, queues=set())
else:
dctRet[host_name] = HostInfo(host=host_name, slots=None, memory=None, state=None, slots_used=None,
mem_used=None, queues=set())
for queue_info in queues_tree.findall('*/Queue-List'):
state = queue_info.findtext('state')
if state is None: state = ''
# Ignore suspended state
state = re.sub('s', '', state)
# Ignore configuration ambiguous state
state = re.sub('c', '', state)
# If disabled, ignore other state flags, because they can vary between queues on a host
if 'd' in state:
state = 'd'
queue = queue_info.findtext('name')
queue_split = queue.split('@', 1)
host = queue_split[1]
queue_name = queue_split[0]
if queue_name in queues_to_ignore:
continue
host_info = dctRet.get(host)
host_info.queues.add(queue_name)
if len(state) > 0:
if host_info is None:
logging.log_message(host + " found in qstat but not qhost")
elif host_info.state is None:
dctRet[host] = host_info._replace(state=state)
elif not is_host_state_compatible(host_info.state, state):
raise Exception("Conflicting states for %s: %s != %s" % (host, host_info.state, state))
return dctRet
|
a5f5154ac50d358b4a523872ffcaba3030d2f722
| 18,948 |
import click
def _get_param_type_from_str(
type_name: str = None,
param_doc: docstring_parser.DocstringParam = None,
) -> t.Tuple[_ParamArgs, t.Union[click.ParamType, None]]:
"""Guess parameter type from parameter type name."""
type_name = type_name or ""
desc = param_doc.description if param_doc else ""
if type_name == "int":
return _ParamArgs.single, int
elif type_name == "float":
return _ParamArgs.single, float
elif type_name == "bytes":
return _ParamArgs.single, bytes
elif type_name == "bool":
return _ParamArgs.flag, None
elif type_name[:4] == "list":
args, element = _get_param_type_from_str(type_name[5:-1], param_doc)
assert args is _ParamArgs.single
return _ParamArgs.multiple, element
elif type_name[:5] == "tuple":
els = (_get_param_type_from_str(n)[1] for n in type_name[6:-1].split(", "))
return _ParamArgs.single, click.Tuple(els)
elif type_name == "io.FileIO":
return _ParamArgs.single, _build_file_param_type(desc)
elif type_name == "pathlib.Path":
return _ParamArgs.single, _build_path_param_type(desc)
elif type_name == "datetime.datetime":
return _ParamArgs.single, click.DateTime()
elif type_name == "uuid.UUID":
return _ParamArgs.single, click.UUID
else:
logger.warning("Cannot guess parameter type from name: %s", type_name)
return _ParamArgs.single, None
|
a13621ffbed428fbc32f6285e2fc0a2b53097cad
| 18,949 |
def solve(task: str) -> int:
"""How many differently colored bags can contain shiny gold?"""
parents = process_data(task)
seen = set()
candidates = parents["shiny gold"]
while candidates:
candidate = candidates.pop()
if candidate not in seen:
seen.add(candidate)
candidates.extend(parents[candidate])
return len(seen)
|
ea505c346a4482b9516ad22baa71d251b7e1dc41
| 18,950 |
import urllib
import base64
import hashlib
import requests
def cityDesc(codePostal):
"""
code de retour :
100 : tout est normal
200 : la requete n'a pas abouti
300 : pas de cine dans la ville
400 : la ville n'existe pas
"""
headersUA = init_connect()
YMDstr = getDate()
searchField = codePostal
filterField = ''
countField = '500'
pageField = '1'
url = 'q=' + searchField + '&filter=' + filterField + '&count=' + countField + '&page=' + pageField + '&format=json&partner=' + allocine_partner + '&sed=' + YMDstr
toEncrypt = allocine_secret_key + url
sig = urllib.parse.quote_plus(base64.b64encode(hashlib.sha1(toEncrypt.encode('utf-8')).digest()))
urlComplete = 'http://api.allocine.fr/rest/v3/search?' + url + "&sig=" + sig
codeRetour = 200
listeCine = []
try:
req = requests.get(urlComplete, headers=headersUA)
except:
return listeCine, codeRetour
# print(req.json())
if req.status_code == 200:
codeRetour = 100
if 'location' in req.json()['feed']:
if 'theater' in req.json()['feed']:
for theaterCity in req.json()['feed']['theater']:
listeCine.append(theaterCity)
else:
codeRetour = 300
else:
codeRetour = 400
return listeCine, codeRetour
|
06da49d9afe5420869204a423a2db31df11cc58e
| 18,951 |
async def get_reposet(request: AthenianWebRequest, id: int) -> web.Response:
"""List a repository set.
:param id: Numeric identifier of the repository set to list.
:type id: repository set ID.
"""
rs_cols = [
RepositorySet.name,
RepositorySet.items,
RepositorySet.precomputed,
RepositorySet.tracking_re,
]
rs, _ = await fetch_reposet(id, rs_cols, request.uid, request.sdb, request.cache)
return model_response(RepositorySetWithName(
name=rs.name, items=rs.items, precomputed=rs.precomputed))
|
a3a2cf6cb1152aadb81798cfa3e1be214635edad
| 18,952 |
from xgboost_ray.elastic import _maybe_schedule_new_actors, \
def _train(params: Dict,
dtrain: RayDMatrix,
model_factory: Type[LGBMModel],
boost_rounds_left: int,
*args,
evals=(),
ray_params: RayParams,
cpus_per_actor: int,
gpus_per_actor: int,
_training_state: _TrainingState,
machine_addresses: Optional[List[Tuple[str, str]]] = None,
listen_port: Optional[int] = None,
**kwargs) -> Tuple[LGBMModel, Dict, Dict]:
"""This is the local train function wrapped by :func:`train() <train>`.
This function can be thought of one invocation of a multi-actor lightgbm
training run. It starts the required number of actors, triggers data
loading, collects the results, and handles (i.e. registers) actor failures
- but it does not handle fault tolerance or general training setup.
Generally, this function is called one or multiple times by the
:func:`train() <train>` function. It is called exactly once if no
errors occur. It is called more than once if errors occurred (e.g. an
actor died) and failure handling is enabled.
"""
_update_scheduled_actor_states, _get_actor_alive_status
# Un-schedule possible scheduled restarts
_training_state.restart_training_at = None
params = deepcopy(params)
if "n_jobs" in params:
if params["n_jobs"] > cpus_per_actor:
raise ValueError(
"Specified number of threads greater than number of CPUs. "
"\nFIX THIS by passing a lower value for the `n_jobs` "
"parameter or a higher number for `cpus_per_actor`.")
else:
params["n_jobs"] = cpus_per_actor
_check_cpus_per_actor_at_least_2(
params["n_jobs"], getattr(ray_params, "allow_less_than_two_cpus",
False))
# This is a callback that handles actor failures.
# We identify the rank of the failed actor, add this to a set of
# failed actors (which we might want to restart later), and set its
# entry in the actor list to None.
def handle_actor_failure(actor_id):
rank = _training_state.actors.index(actor_id)
_training_state.failed_actor_ranks.add(rank)
_training_state.actors[rank] = None
# Here we create new actors. In the first invocation of _train(), this
# will be all actors. In future invocations, this may be less than
# the num_actors setting, depending on the failure mode.
newly_created = 0
for i in list(_training_state.failed_actor_ranks):
if _training_state.actors[i] is not None:
raise RuntimeError(
f"Trying to create actor with rank {i}, but it already "
f"exists.")
ip = None
port = None
if machine_addresses:
ip = machine_addresses[i][0]
port = machine_addresses[i][1]
elif listen_port:
port = listen_port
actor = _create_actor(
rank=i,
num_actors=ray_params.num_actors,
model_factory=model_factory,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
placement_group=_training_state.placement_group,
queue=_training_state.queue,
checkpoint_frequency=ray_params.checkpoint_frequency,
distributed_callbacks=ray_params.distributed_callbacks,
ip=ip,
port=port)
# Set actor entry in our list
_training_state.actors[i] = actor
# Remove from this set so it is not created again
_training_state.failed_actor_ranks.remove(i)
newly_created += 1
alive_actors = sum(1 for a in _training_state.actors if a is not None)
logger.info(f"[RayLightGBM] Created {newly_created} new actors "
f"({alive_actors} total actors). Waiting until actors "
f"are ready for training.")
# For distributed datasets (e.g. Modin), this will initialize
# (and fix) the assignment of data shards to actor ranks
dtrain.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
dtrain.assign_shards_to_actors(_training_state.actors)
for deval, _ in evals:
deval.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
deval.assign_shards_to_actors(_training_state.actors)
load_data = [dtrain] + [eval[0] for eval in evals]
prepare_actor_tasks = [
_PrepareActorTask(
actor,
# Maybe we got a new Queue actor, so send it to all actors.
queue=_training_state.queue,
# Maybe we got a new Event actor, so send it to all actors.
stop_event=_training_state.stop_event,
# Trigger data loading
load_data=load_data) for actor in _training_state.actors
if actor is not None
]
start_wait = time.time()
last_status = start_wait
try:
# Construct list before calling any() to force evaluation
ready_states = [task.is_ready() for task in prepare_actor_tasks]
while not all(ready_states):
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Waiting until actors are ready "
f"({wait_time:.0f} seconds passed).")
last_status = time.time()
time.sleep(0.1)
ready_states = [task.is_ready() for task in prepare_actor_tasks]
except Exception as exc:
_training_state.stop_event.set()
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
logger.info("[RayLightGBM] Starting LightGBM training.")
# # Start Rabit tracker for gradient sharing
# rabit_process, env = _start_rabit_tracker(alive_actors)
# rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Load checkpoint if we have one. In that case we need to adjust the
# number of training rounds.
if _training_state.checkpoint.value:
booster = Booster(
model_str=pickle.loads(_training_state.checkpoint.value))
kwargs["init_model"] = booster
if _training_state.checkpoint.iteration == -1:
# -1 means training already finished.
logger.error(
"Trying to load continue from checkpoint, but the checkpoint"
"indicates training already finished. Returning last"
"checkpointed model instead.")
return kwargs["init_model"], {}, _training_state.additional_results
# The callback_returns dict contains actor-rank indexed lists of
# results obtained through the `put_queue` function, usually
# sent via callbacks.
callback_returns = _training_state.additional_results.get(
"callback_returns")
if callback_returns is None:
callback_returns = [list() for _ in range(len(_training_state.actors))]
_training_state.additional_results[
"callback_returns"] = callback_returns
_training_state.training_started_at = time.time()
# Trigger the train function
live_actors = [
actor for actor in _training_state.actors if actor is not None
]
# LightGBM specific: handle actor addresses
# if neither local_listening_port nor machines are set
# get the ips and a random port from the actors, and then
# assign them back so the lgbm params are updated.
# do this in a loop to ensure that if there is a port
# confilict, it can try and choose a new one. Most of the times
# it will complete in one iteration
machines = None
for _ in range(5):
addresses = ray.get(
[actor.find_free_address.remote() for actor in live_actors])
if addresses:
_, ports = zip(*addresses)
ports = list(ports)
machine_addresses_new = [f"{ip}:{port}" for ip, port in addresses]
if len(machine_addresses_new) == len(set(machine_addresses_new)):
machines = ",".join(machine_addresses_new)
break
if machine_addresses:
raise ValueError(
"Machine addresses contains non-unique entries.")
else:
logger.debug("Couldn't obtain unique addresses, trying again.")
if machines:
logger.debug(f"Obtained unique addresses in {i} attempts.")
else:
raise ValueError(
f"Couldn't obtain enough unique addresses for {len(live_actors)}."
" Try reducing the number of actors.")
for i, actor in enumerate(live_actors):
actor.set_network_params.remote(machines, ports[i], len(live_actors),
params.get("time_out", 120))
training_futures = [
actor.train.remote(
i == 0, # return_bst
params,
dtrain,
evals,
boost_rounds_left,
*args,
**kwargs) for i, actor in enumerate(live_actors)
]
# Failure handling loop. Here we wait until all training tasks finished.
# If a training task fails, we stop training on the remaining actors,
# check which ones are still alive, and raise the error.
# The train() wrapper function will then handle the error.
start_wait = time.time()
last_status = start_wait
try:
not_ready = training_futures
while not_ready:
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
if ray_params.elastic_training \
and not ELASTIC_RESTART_DISABLED:
_maybe_schedule_new_actors(
training_state=_training_state,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
ray_params=ray_params,
load_data=load_data)
# This may raise RayXGBoostActorAvailable
_update_scheduled_actor_states(_training_state)
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Training in progress "
f"({wait_time:.0f} seconds since last restart).")
last_status = time.time()
ready, not_ready = ray.wait(
not_ready, num_returns=len(not_ready), timeout=1)
ray.get(ready)
# Get items from queue one last time
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
# The inner loop should catch all exceptions
except Exception as exc:
logger.debug(f"Caught exception in training loop: {exc}")
# Stop all other actors from training
_training_state.stop_event.set()
# Check which actors are still alive
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
# Training is now complete.
# # Stop Rabit tracking process
# _stop_rabit_tracker(rabit_process)
# Get all results from all actors.
all_results: List[Dict[str, Any]] = ray.get(training_futures)
# All results should be the same. But only
# the first one actually returns its bst object.
bst: LGBMModel = all_results[0]["bst"]
evals_result = all_results[0]["evals_result"]
if not listen_port:
for param in _ConfigAliases.get("local_listen_port"):
bst._other_params.pop(param, None)
if not machine_addresses:
for param in _ConfigAliases.get("machines"):
bst._other_params.pop(param, None)
for param in _ConfigAliases.get("num_machines", "time_out"):
bst._other_params.pop(param, None)
if callback_returns:
_training_state.additional_results[
"callback_returns"] = callback_returns
total_n = sum(res["train_n"] or 0 for res in all_results)
_training_state.additional_results["total_n"] = total_n
return bst, evals_result, _training_state.additional_results
|
f52508ffb67e4e29dde7300099e616973c7d6740
| 18,953 |
def is_called_at_module_level() -> bool:
"""
Check if the current function is being called at the module level.
Raise `RuntimeError` if `is_called_at_module_level()` is not called in a function.
"""
if not (frame := getcallerframe().f_back):
raise RuntimeError(
"is_called_at_module_level() expects to be called in a function"
)
# There is currently no reliable and officially-provided way to determine whether a
# function is called from the module level or not.
#
# Therefore we use a try-best-effort heuristic approach here.
#
# This check could emit false positive in the case of some advanced dynamic-reflection
# inspection tricks, like `func.__code__ = func.__code__.replace(co_name="<module>")`.
#
# However such case is so unlikely and rare that we should not be concerned.
#
# We are good with the current approach as it works for most cases.
return frame.f_code.co_name == "<module>"
|
0c807205472021b20c7b7bad27c8b5f7a634dd85
| 18,954 |
def extract_dominant_keypoints2D(keypoint_2D, dominant_hand):
""" Extract keypoint 2D.
# Look Later with Octavio
# Arguments
keypoint_2D: Numpy array of shape (num_keypoints, 1).
dominant_hand: List of size (2) with booleans.
# Returns
keypoint_visibility_2D_21: Numpy array of shape (num_keypoints, 1).
"""
keypoint_visibility_left = keypoint_2D[:LEFT_PINKY_TIP, :]
keypoint_visibility_right = keypoint_2D[RIGHT_WRIST:RIGHT_PINKY_TIP, :]
keypoint_visibility_2D_21 = np.where(
dominant_hand[:, :2], keypoint_visibility_left,
keypoint_visibility_right)
return keypoint_visibility_2D_21
|
2581b4cb68d6dad2da3933259582f9160224eef9
| 18,955 |
def translation_ev(h, t, tol=1e6):
"""Compute the eigenvalues of the translation operator of a lead.
Adapted from kwant.physics.leads.modes.
Parameters
----------
h : numpy array, real or complex, shape (N, N) The unit cell
Hamiltonian of the lead unit cell.
t : numpy array, real or complex, shape (N, M)
The hopping matrix from a lead cell to the one on which self-energy
has to be calculated (and any other hopping in the same direction).
tol : float
Numbers and differences are considered zero when they are smaller
than `tol` times the machine precision.
Returns
-------
ev : numpy array
Eigenvalues of the translation operator in the form lambda=r*exp(i*k),
for |r|=1 they are propagating modes.
"""
a, b = kwant.physics.leads.setup_linsys(h, t, tol, None).eigenproblem
ev = kwant.physics.leads.unified_eigenproblem(a, b, tol=tol)[0]
return ev
|
b5534b782b487ca195ab9b78438e68e81a62e74a
| 18,956 |
def bsearch(n, pred):
"""
Given a boolean function pred that takes index arguments in [0, n).
Assume the boolean function pred returns all False and then all True for
values. Return the index of the first True, or n if that does not exist.
"""
# invariant: last False lies in [l, r) and pred(l) is False
if pred(0):
return 0
l = 0
r = n
while r-l > 1:
m = l + (r-l)//2
result = pred(m)
if result:
r = m
else:
l = m
return l+1
|
9274732aa9e24a0d0f73399ff50c19a544ac06a7
| 18,957 |
def test_rotating_file_handler_interval(tmpdir, logger, monkeypatch):
"""Test the rotating file handler when the rollover return a time smaller
than the current time.
"""
def rollover(obj, current_time):
return current_time - 0.1
monkeypatch.setattr(DayRotatingTimeHandler, 'computeRollover', rollover)
handler = DayRotatingTimeHandler(str(tmpdir.join('test.log')))
handler.interval = 0.2
logger.addHandler(handler)
# Probably because we gives a negative time.
assert len(tmpdir.listdir()) == 1
logger.info('test')
sleep(1)
logger.info('test')
assert len(tmpdir.listdir()) == 3
|
f6394f215e452fd7875b1fc624db9cb50cc19ed8
| 18,958 |
def compute_zero_crossing_wavelength(period, water_depth, gravity=GRAVITY):
"""Computes zero-crossing wavelength from given period.
This uses the dispersion relation for linear waves.
"""
return wavenumber_to_wavelength(
frequency_to_wavenumber(1. / period, water_depth, gravity)
)
|
575ff3d251575fa7a232c120bde15f8f57c1dac9
| 18,959 |
def launch_transport_listener(transport, bindaddr, role, remote_addrport, pt_config, ext_or_cookie_file=None):
"""
Launch a listener for 'transport' in role 'role' (socks/client/server/ext_server).
If 'bindaddr' is set, then listen on bindaddr. Otherwise, listen
on an ephemeral port on localhost.
'remote_addrport' is the TCP/IP address of the other end of the
circuit. It's not used if we are in 'socks' role.
'pt_config' contains configuration options (such as the state location)
which are of interest to the pluggable transport.
'ext_or_cookie_file' is the filesystem path where the Extended
ORPort Authentication cookie is stored. It's only used in
'ext_server' mode.
Return a tuple (addr, port) representing where we managed to bind.
Throws obfsproxy.transports.transports.TransportNotFound if the
transport could not be found.
Throws twisted.internet.error.CannotListenError if the listener
could not be set up.
"""
listen_host = bindaddr[0] if bindaddr else 'localhost'
listen_port = int(bindaddr[1]) if bindaddr else 0
if role == 'socks':
transport_class = FTETransportClient
if hasattr(socks, "OBFSSOCKSv5Factory"):
# obfsproxy >= 0.2.7 provides SOCKS5.
factory = socks.OBFSSOCKSv5Factory(transport_class, pt_config)
pt_config.fte_client_socks_version = 5
elif hasattr(socks, "SOCKSv4Factory"):
# obfsproxy < 0.2.7 provides SOCKS4.
factory = socks.SOCKSv4Factory(transport_class, pt_config)
pt_config.fte_client_socks_version = 4
else:
# This will only happen if the obfsproxy people change the socks
# code again. This really is a dependency issue, so raise an
# ImportError.
raise ImportError("Failed to setup an obfsproxy SOCKS server factory")
elif role == 'ext_server':
assert(remote_addrport and ext_or_cookie_file)
transport_class = FTETransportServer
factory = extended_orport.ExtORPortServerFactory(
remote_addrport, ext_or_cookie_file, transport, transport_class, pt_config)
elif role == 'client':
assert(remote_addrport)
transport_class = FTETransportClient
factory = network.StaticDestinationServerFactory(
remote_addrport, role, transport_class, pt_config)
elif role == 'server':
assert(remote_addrport)
transport_class = FTETransportServer
factory = network.StaticDestinationServerFactory(
remote_addrport, role, transport_class, pt_config)
else:
raise InvalidRoleException()
addrport = twisted.internet.reactor.listenTCP(
listen_port, factory, interface=listen_host)
return (addrport.getHost().host, addrport.getHost().port)
|
98ba849b3adc58b14b8983d0e61a409bc5ce3af7
| 18,960 |
import os
def div(style, render=False, label=''):
"""Render divider."""
if len(style) == 1:
if label == '':
res = hfill('', style)
else:
res = hfill(style * 2 + ' ' + label + ' ', style)
elif style[0] == style[-1]:
# Windows does line wrapping weird
sp_left = '\n' * (len(style) - (2 if os.name == 'nt' else 1))
sp_right = '\n' * (len(style) - 1)
res = (
hfill('', style[0]) + sp_left + center(label)
+ sp_right + hfill('', style[0]))
else:
raise ValueError("""Style not recognized. Available styles:
> '-', '=', or any other single character sequence
> '- -', '= =', or any other repeated character with n>=0 separating spaces
""")
if render:
return res
else:
print(res)
|
59a1801f749671e9c68613457cd934ca458c7eb6
| 18,961 |
import socket
import zmq
def send_array(A, flags=0, copy=True, track=False):
"""send a numpy array with metadata
Inputs
------
A: (subplots,dim) np array to transmit
subplots - the amount of subplots that are
defined in the current plot
dim - the amount of data that you want to plot.
This is not fixed
"""
#If you get a float value, convert it to a numpy array
if(isinstance(A,float)):
A = np.array(A).reshape(1,1)
#If array is one dimensional, reshape to two dimensions
if(len(A.shape) ==1):
A = A.reshape(-1,1)
#Create dict to reconstruct array
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
#Send category
socket.send_string(SENDING_DATA)
#Send json description
socket.send_json(md, flags|zmq.SNDMORE)
#Send array
return socket.send(A, flags, copy=copy, track=track)
|
c53a17918c12e3ccec9046aad7fc7fc2f498a8ea
| 18,962 |
import json
def api_file_upload(request):
""" Upload a file to the storage system """
try:
fobj = request.FILES["file"]
checksum, ext = fobj._name.split(".")
try:
request.user.check_staged_space(fobj._size, checksum)
except Exception as e:
return HttpResponseForbidden(str(e))
write_file_to_storage(fobj, check_valid=True)
StagedFile.objects.get_or_create(
checksum=checksum,
file_size=fobj._size,
uploaded_by=request.user
)
return HttpResponse(json.dumps({
"success": True,
}))
except KeyError:
return HttpResponseBadRequest("Invalid file upload request")
except Exception as e:
handle_server_error(request)
return HttpResponseServerError(content=str(e), reason=str(e))
|
80b15b4d92b5ba2f3a247f1baf7900e73b18781f
| 18,963 |
import logging
def calculateAggregateInterferenceForGwpz(gwpz_record, grants):
"""Calculates per-channel aggregate interference for GWPZ.
Args:
gwpz_record: A GWPZ record dict.
grants: An iterable of CBSD grants of type |data.CbsdGrantInfo|.
Returns:
Aggregate interference to GWPZ in the nested dictionary format.
{latitude : {longitude: [aggr_interf1(mW), ..., aggr_interfK(mW)]}}
The list contains the value per protected channel.
"""
gwpz_region = gwpz_record['landCategory']
# Get Fine Grid Points for a GWPZ protection area
protection_points = utils.GridPolygon(gwpz_record['zone']['features'][0]['geometry'],
GWPZ_GRID_RES_ARCSEC)
gwpz_freq_range = gwpz_record['record']['deploymentParam'][0]\
['operationParam']['operationFrequencyRange']
gwpz_low_freq = gwpz_freq_range['lowFrequency']
gwpz_high_freq = gwpz_freq_range['highFrequency']
# Get channels over which area incumbent needs partial/full protection
protection_channels = interf.getProtectedChannels(gwpz_low_freq, gwpz_high_freq)
# Calculate aggregate interference from each protection constraint with a
# pool of parallel processes.
logging.info('Computing aggregateInterferenceForPoint for PPA (%s), channels (%s), '
'nPoints (%d), grants (%s), region_type (%s)',
gwpz_record, protection_channels, len(protection_points), grants, gwpz_region)
logging.debug(' points: %s', protection_points)
interfCalculator = partial(aggregateInterferenceForPoint,
channels=protection_channels,
grants=grants,
fss_info=None,
esc_antenna_info=None,
protection_ent_type=data.ProtectedEntityType.GWPZ_AREA,
region_type=gwpz_region)
pool = mpool.Pool()
interferences = pool.map(interfCalculator, protection_points)
return InterferenceDict(interferences)
|
d2b7d1b1270e4a6da302e4d13a261a29d51b0a12
| 18,964 |
import pandas
def from_pandas_ephemeral(
engine: Engine,
df: pandas.DataFrame,
convert_objects: bool,
name: str
) -> DataFrame:
"""
Instantiate a new DataFrame based on the content of a Pandas DataFrame. The data will be represented
using a `select * from values()` query, or something similar depending on the database dialect.
Warning: This method is only suited for small quantities of data.
For anything over a dozen kilobytes of data it is recommended to store the data in a table in
the database, e.g. by using the from_pd_store_table() function.
Supported dtypes are 'int64', 'float64', 'string', 'datetime64[ns]', 'bool'
:param engine: db connection
:param df: Pandas DataFrame to instantiate as DataFrame
:param convert_objects: If True, columns of type 'object' are converted to 'string' using the
pd.convert_dtypes() method where possible.
"""
# todo add dtypes argument that explicitly let's you set the supported dtypes for pandas columns
df_copy, index_dtypes, all_dtypes = _from_pd_shared(
dialect=engine.dialect,
df=df,
convert_objects=convert_objects,
cte=True
)
column_series_type = [get_series_type_from_dtype(dtype) for dtype in all_dtypes.values()]
per_row_expr = []
for row in df_copy.itertuples():
per_column_expr = []
# Access the columns in `row` by index rather than by name. Because if a name starts with an
# underscore (e.g. _index_skating_order) it will not be available as attribute.
# so we use `row[i]` instead of getattr(row, column_name).
# start=1 is to account for the automatic index that pandas adds
for i, series_type in enumerate(column_series_type, start=1):
val = row[i]
per_column_expr.append(
series_type.value_to_expression(dialect=engine.dialect, value=val, dtype=series_type.dtype)
)
row_expr = Expression.construct('({})', join_expressions(per_column_expr))
per_row_expr.append(row_expr)
all_values_str = join_expressions(per_row_expr, join_str=',\n').to_sql(engine.dialect)
if is_postgres(engine):
# We are building sql of the form:
# select * from (values
# ('row 1', cast(1234 as bigint), cast(-13.37 as double precision)),
# ('row 2', cast(1337 as bigint), cast(31.337 as double precision))
# ) as t("a", "b", "c")
column_names_expr = join_expressions(
[Expression.raw(quote_identifier(engine.dialect, column_name)) for column_name in
all_dtypes.keys()]
)
column_names_str = column_names_expr.to_sql(engine.dialect)
sql = f'select * from (values \n{all_values_str}\n) as t({column_names_str})\n'
elif is_bigquery(engine):
# We are building sql of the form:
# select * from UNNEST([
# STRUCT<`a` STRING, `b` INT64, `c` FLOAT64>
# ('row 1', 1234, cast(-13.37 as FLOAT64))
# ('row 2', 1337, cast(31.337 as FLOAT64))
# ])
sql_column_name_types = []
for col_name, dtype in all_dtypes.items():
db_col_name = quote_identifier(dialect=engine.dialect, name=col_name)
db_dtype = get_series_type_from_dtype(dtype).get_db_dtype(dialect=engine.dialect)
sql_column_name_types.append(f'{db_col_name} {db_dtype}')
sql_struct = f'STRUCT<{", ".join(sql_column_name_types)}>'
sql = f'select * from UNNEST([{sql_struct} \n{all_values_str}\n])\n'
else:
raise DatabaseNotSupportedException(engine)
model_builder = CustomSqlModelBuilder(sql=sql, name=name)
sql_model = model_builder()
index = list(index_dtypes.keys())
return DataFrame.from_model(engine=engine, model=sql_model, index=index, all_dtypes=all_dtypes)
|
afa06bd4e89a53aa0d420ab401137c10e3b46d89
| 18,965 |
def set_vars(api, file:str, tess_profile:dict):
"""
Reads the user-specific variables from the tess_profile
:param api:
:param file:
:param tess_profile:
:return:
"""
# Set necessary information
api.SetImageFile(file)
# Set Variable
api.SetVariable("save_blob_choices", "T")
if 'variables' in tess_profile:
for var in tess_profile['variables']:
api.SetVariable(var, str(tess_profile['variables'][var]['value']))
api.Recognize()
return 0
|
a7fbe0c5bc584928623e2eadc36240ea3b0f37de
| 18,966 |
def qtrfit(numpoints, defcoords, refcoords, nrot):
"""Find the quaternion, q, [and left rotation matrix, u] that minimizes
| qTXq - Y | ^ 2 [|uX - Y| ^ 2]
This is equivalent to maximizing Re (qTXTqY)
The left rotation matrix, u, is obtained from q by
u = qT1q
Parameters
numpoints: The number of points in each list (int)
defcoords: List of definition coordinates, with each set a list of form [x,y,z] (list)
refcoords: List of fitted coordinates, with each set a list of form [x,y,z] (list)
nrot: The maximum number of jacobi sweeps
Returns
quat: The best-fit quaternion
lrot: The best-fit left rotation matrix
"""
xxyx = 0.0
xxyy = 0.0
xxyz = 0.0
xyyx = 0.0
xyyy = 0.0
xyyz = 0.0
xzyx = 0.0
xzyy = 0.0
xzyz = 0.0
quat = []
cmat = []
for i in range(numpoints):
xxyx = xxyx + defcoords[i][0] * refcoords[i][0]
xxyy = xxyy + defcoords[i][0] * refcoords[i][1]
xxyz = xxyz + defcoords[i][0] * refcoords[i][2]
xyyx = xyyx + defcoords[i][1] * refcoords[i][0]
xyyy = xyyy + defcoords[i][1] * refcoords[i][1]
xyyz = xyyz + defcoords[i][1] * refcoords[i][2]
xzyx = xzyx + defcoords[i][2] * refcoords[i][0]
xzyy = xzyy + defcoords[i][2] * refcoords[i][1]
xzyz = xzyz + defcoords[i][2] * refcoords[i][2]
for i in range(4):
cmat.append([])
for _ in range(4):
cmat[i].append(0.0)
cmat[0][0] = xxyx + xyyy + xzyz
cmat[0][1] = xzyy - xyyz
cmat[0][2] = xxyz - xzyx
cmat[0][3] = xyyx - xxyy
cmat[1][1] = xxyx - xyyy - xzyz
cmat[1][2] = xxyy + xyyx
cmat[1][3] = xzyx + xxyz
cmat[2][2] = xyyy - xzyz - xxyx
cmat[2][3] = xyyz + xzyy
cmat[3][3] = xzyz - xxyx - xyyy
_, vmat = jacobi(cmat, nrot) # diagonalize c
for i in range(4):
quat.append(vmat[i][3])
lrot = q2mat(quat)
return quat, lrot
|
fdfde9deaf0b220bd468031264b029125c071fab
| 18,967 |
import struct
def _embedded_bundles_partial_impl(
ctx,
bundle_embedded_bundles,
embeddable_targets,
frameworks,
plugins,
watch_bundles):
"""Implementation for the embedded bundles processing partial."""
_ignore = [ctx]
embeddable_providers = [
x[_AppleEmbeddableInfo]
for x in embeddable_targets
if _AppleEmbeddableInfo in x
]
transitive_frameworks = []
transitive_plugins = []
transitive_watch_bundles = []
for provider in embeddable_providers:
transitive_frameworks.append(provider.frameworks)
transitive_plugins.append(provider.plugins)
transitive_watch_bundles.append(provider.watch_bundles)
bundle_zips = []
if bundle_embedded_bundles:
bundle_zips.extend([
(processor.location.framework, None, depset(transitive = transitive_frameworks)),
(processor.location.plugin, None, depset(transitive = transitive_plugins)),
(processor.location.watch, None, depset(transitive = transitive_watch_bundles)),
])
# Clear the transitive lists to avoid propagating them, since they will be packaged in the
# bundle processing this partial and do not need to be propagated.
transitive_frameworks = []
transitive_plugins = []
transitive_watch_bundles = []
return struct(
bundle_zips = bundle_zips,
providers = [
_AppleEmbeddableInfo(
frameworks = depset(frameworks, transitive = transitive_frameworks),
plugins = depset(plugins, transitive = transitive_plugins),
watch_bundles = depset(watch_bundles, transitive = transitive_watch_bundles),
),
],
)
|
fe057887528a922ae89fe4c6d8066590d006e415
| 18,968 |
import yaml
def load_instrument(yml):
"""
Instantiate an instrument from YAML spec.
Parameters
----------
yml : str
filename for the instrument configuration in YAML format.
Returns
-------
hexrd.instrument.HEDMInstrument
Instrument instance.
"""
with open(yml, 'r') as f:
icfg = yaml.safe_load(f)
return instrument.HEDMInstrument(instrument_config=icfg)
|
a882b3b36def975b40124078c907a0f050b67a6f
| 18,969 |
from typing import List
def serialize_model(self: models.Model, excludes: List[str] = None) -> dict:
"""
模型序列化,会根据 select_related 和 prefetch_related 关联查询的结果进行序列化,可以在查询时使用 only、defer 来筛选序列化的字段。
它不会自做主张的去查询数据库,只用你查询出来的结果,成功避免了 N+1 查询问题。
# See:
https://aber.sh/articles/A-new-idea-of-serializing-Django-model/
"""
excludes = excludes or []
serialized = set()
if getattr(settings, "DSA_SERIALIZE_TO_CAMELCASE", False):
to_camel_case_func = string_convert
else:
to_camel_case_func = do_nothing
def _serialize_model(model) -> dict:
# 当 model 存在一对一字段时,会陷入循环,使用闭包的自由变量存储已序列化的 model,
# 在第二次循环到该 model 时直接返回 model.pk,不再循环。
nonlocal serialized
if model in serialized:
return model.pk
else:
serialized.add(model)
# 当 model 存在一对一或一对多字段,且该字段的值为 None 时,直接返回空{},否则会报错。
if model is None:
return {}
result = {
to_camel_case_func(name): _serialize_model(foreign_key)
for name, foreign_key in model.__dict__["_state"]
.__dict__.get("fields_cache", {})
.items()
}
buried_fields = getattr(model, "buried_fields", [])
for name, value in model.__dict__.items():
# 敏感字段不需要序列化
if name in buried_fields:
continue
# 私有属性不需要序列化
if name.startswith("_"):
continue
result[to_camel_case_func(name)] = value
for name, queryset in model.__dict__.get(
"_prefetched_objects_cache", {}
).items():
result[to_camel_case_func(name)] = [_serialize_model(model) for model in queryset] # type: ignore
return result
results = _serialize_model(self)
# 剔除排斥的字段
for field_name in excludes:
del results[to_camel_case_func(field_name)]
return results
|
2c9a95b4d0e671492eefc73800d43196b6daa604
| 18,970 |
def isint(x):
"""
For an ``mpf`` *x*, or any type that can be converted
to ``mpf``, determines whether *x* is exactly
integer-valued::
>>> from sympy.mpmath import *
>>> isint(3), isint(mpf(3)), isint(3.2)
(True, True, False)
"""
if isinstance(x, int_types):
return True
try:
x = mpmathify(x)
except:
return False
if isinstance(x, mpf):
if isnan(x) or isinf(x):
return False
return x == int(x)
return False
|
ec4516fa450cfc0e58c9a69d95f0f9b8aff2443c
| 18,971 |
def update_header(file):
"""
Create a standard WCS header from the HDF5 header. To do this we clean up the
header data (which is initially stored in individual arrays). We then create
a new header dictionary with the old cleaned header info. Finally, we use
astropy.wcs.WCS to create an updated WCS header for the 2 spatial dimensions.
This is then saved to self.header while the header dictionary is saved
as self.hdr_dict.
Args:
file: hdf5 File object containing HDF5 file
"""
hdr_dict = {}
header_cols = [str(val[0]).replace("'b", '').replace("'", "").replace("b", '') for val in
list(file['header'][()])]
header_vals = [str(val[1]).replace("'b", '').replace("'", "").replace("b", '') for val in
list(file['header'][()])]
header_types = [val[3] for val in list(file['header'][()])]
for header_col, header_val, header_type in zip(header_cols, header_vals, header_types):
if 'bool' in str(header_type):
hdr_dict[header_col] = bool(header_val)
if 'float' in str(header_type):
hdr_dict[header_col] = float(header_val)
if 'int' in str(header_type):
hdr_dict[header_col] = int(header_val)
else:
try:
hdr_dict[header_col] = float(header_val)
except:
hdr_dict[header_col] = str(header_val)
hdr_dict['CTYPE3'] = 'WAVE-SIP'
hdr_dict['CUNIT3'] = 'm'
# hdr_dict['NAXIS1'] = 2064
# hdr_dict['NAXIS2'] = 2048
# Make WCS
wcs_data = WCS(hdr_dict, naxis=2)
header = wcs_data.to_header()
header.insert('WCSAXES', ('SIMPLE', 'T'))
header.insert('SIMPLE', ('NAXIS', 2), after=True)
# self.header.insert('NAXIS', ('NAXIS1', 2064), after=True)
# self.header.insert('NAXIS1', ('NAXIS2', 2048), after=True)
hdr_dict = hdr_dict
return header, hdr_dict
|
1c46139a747acdf69ea6602ea123d20f540de30b
| 18,972 |
def get_MD_psat():
""" MD data for saturation densities:
Thermodynamic properties of the 3D Lennard-Jones/spline model
Bjørn Hafskjold and Karl Patrick Travis and Amanda Bailey Hass and
Morten Hammer and Ailo Aasen and Øivind Wilhelmsen
doi: 10.1080/00268976.2019.1664780
"""
T = np.array([0.5501, 0.5499, 0.5496, 0.5997, 0.6500, 0.7000, 0.7504,
0.8000, 0.8202, 0.8407, 0.8596, 0.8688, 0.8771, 0.8775,
0.6898, 0.7723, 0.8070, 0.8407, 0.8437, 0.8570, 0.8687,
0.8723, 0.8762, 0.8770])
p = np.array([0.002158, 0.002084, 0.002123, 0.004656, 0.008804, 0.015332,
0.025052, 0.038927, 0.045588, 0.054326, 0.063949, 0.069529,
0.075501, 0.075752, 0.014112, 0.031532, 0.042154, 0.055300,
0.056660, 0.062675, 0.070558, 0.070944, 0.072616, 0.073748])
data = {}
data["T"] = T
data["P"] = P
return data
|
363107962628ca9796397977f4f41e5b30bcfbc0
| 18,973 |
import logging
def get_reddit_client():
"""Utility to get a Reddit Client"""
reddit_username = redditUsername
reddit_password = redditPassword
reddit_user_agent = redditUserAgent
reddit_client_secret = redditClientSecret
reddit_client_id = redditClientID
logging.info("Logged in as user (%s).." % reddit_username)
reddit_client = praw.Reddit(client_id=reddit_client_id,
client_secret=reddit_client_secret,
password=reddit_password,
user_agent=reddit_user_agent,
username=reddit_username)
return reddit_client
|
fe3a783a3ceb27954c658bf9dc036a067ab103a8
| 18,974 |
import six
def get_member_id():
"""
Retrieve member if for the current process.
:rtype: ``bytes``
"""
proc_info = system_info.get_process_info()
member_id = six.b('%s_%d' % (proc_info['hostname'], proc_info['pid']))
return member_id
|
1d1cc24ffa62cc8982a23ea986bdf3cbecca53ac
| 18,975 |
def get_table_arn():
"""A method to get the DynamoDB table ARN string.
Returns
-------
dict
A dictionary with AWS ARN string for the table ARN.
"""
resp = dynamodb_client.describe_table(
TableName=table_name
)
return {
"table_arn": resp['Table']['TableArn']
}
|
ee6648048b1cabdbc04e6c3507cc4e1992f059b2
| 18,976 |
def replace_characters(request):
"""Function to process execute replace_characters function."""
keys = ['text', 'characters', 'replacement']
values = get_data(request, keys)
if not values[0]:
abort(400, 'missing text parameter')
if not values[2]:
values[2] = ''
return _call('replace_characters', keys, values)
|
4765d7c3ace0a0069cac34adb04381efc7043355
| 18,977 |
def serialize_to_jsonable(obj):
"""
Serialize any object to a JSONable form
"""
return repr(obj)
|
c8632b8b475d49b56d47b29afa8b44676b7882a5
| 18,978 |
def compare(optimizers, problems, runs=20, all_kwargs={}):
"""Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
"""
if not (isinstance(optimizers, collections.Iterable)
or isinstance(problems, collections.Iterable)):
raise TypeError('optimizers or problems must be iterable')
# If optimizers is not a list, repeat into list for each problem
if not isinstance(optimizers, collections.Iterable):
optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))]
# If problems is not a list, repeat into list for each optimizer
if not isinstance(problems, collections.Iterable):
problems = [copy.deepcopy(problems) for _ in range(len(optimizers))]
# If all_kwargs is not a list, repeat it into a list
if isinstance(all_kwargs, dict):
all_kwargs = [all_kwargs] * len(optimizers)
elif not isinstance(all_kwargs, collections.Iterable):
raise TypeError('all_kwargs must be dict or list of dict')
stats = {}
key_counts = {}
for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs):
# For nice human readable dictionaries, extract useful names from
# optimizer
class_name = optimizer.__class__.__name__
fitness_func_name = problem._fitness_function.__name__
key_name = '{} {}'.format(class_name, fitness_func_name)
# Keep track of how many optimizers of each class / fitness func
# for better keys in stats dict
try:
key_counts[key_name] += 1
except KeyError:
key_counts[key_name] = 1
# Foo 1, Foo 2, Bar 1, etc.
key = '{} {}'.format(key_name, key_counts[key_name])
print key + ': ',
# Finally, get the actual stats
stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs)
print
return stats
|
c0f2ed52fe5de8b32e54ca6e3dc05be18145b1e8
| 18,979 |
def az_el2norm(az: float, el: float):
"""Return solar angle as normalized vector."""
theta = np.pi/2-el*np.pi/180
phi = az*np.pi/180
norm = np.asarray(
[
np.sin(theta)*np.cos(phi),
np.sin(theta)*np.sin(phi),
np.cos(theta)
])
return norm
|
5d6e53b778846281a6d2944a52acc64c4386ade4
| 18,980 |
async def api_get_user(user_id: int, db: Session = Depends(get_db)):
"""
Gets user entity
- **user_id**: the user id
- **db**: current database session object
"""
try:
user = await User.get_by_id(id=user_id, db=db)
return user
except UserNotFoundException as e:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=e.detail)
|
2d89fed33e4fbc81e3461b4791f93772a4814ffe
| 18,981 |
import re
def remove_comments(json_like):
"""
Removes C-style comments from *json_like* and returns the result. Example::
>>> test_json = '''\
{
"foo": "bar", // This is a single-line comment
"baz": "blah" /* Multi-line
Comment */
}'''
>>> remove_comments('{"foo":"bar","baz":"blah",}')
'{\n "foo":"bar",\n "baz":"blah"\n}'
From: https://gist.github.com/liftoff/ee7b81659673eca23cd9fc0d8b8e68b7
"""
comments_re = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE,
)
def replacer(match):
s = match.group(0)
if s[0] == "/":
return ""
return s
return comments_re.sub(replacer, json_like)
|
32ddf8dd19a8d1029b0d5f221aed05c3883d8ee5
| 18,982 |
def install_editable(projectroot, **kwargs):
"""Install the given project as an "editable" install."""
return run_pip('install', '-e', projectroot, **kwargs)
|
70b4f5dc1da26beaae31bb1aeaed4457a0d055a3
| 18,983 |
import time
def retry_get(tap_stream_id, url, config, params=None):
"""Wrap certain streams in a retry wrapper for frequent 500s"""
retries = 20
delay = 120
backoff = 1.5
attempt = 1
while retries >= attempt:
r = authed_get(tap_stream_id, url, config, params)
if r.status_code != 200:
logger.info(f'Got a status code of {r.status_code}, attempt '
f'{attempt} of {retries}. Backing off for {delay} '
f'seconds')
time.sleep(delay)
delay *= backoff
attempt += 1
else:
return r
logger.error(f'Status code of latest attempt: {r.status_code}')
logger.error(f'Latest attempt response {r.content}')
raise ValueError(f'Failed {retries} times trying to hit endpoint {url}')
|
af9a0dd8d5c7022467562b7c339f8340b6d87d73
| 18,984 |
from typing import Mapping
from datetime import datetime
from typing import Tuple
def build_trie_from_to(template_dictionary: Mapping, from_timestamp: datetime.datetime, to_timestamp: datetime.datetime) -> Tuple[ahocorasick.Automaton, Mapping]:
"""Function which builds the trie from the first timestamp tot the last one given"""
trie = ahocorasick.Automaton()
words_mapping = dict() # words mapping
word_templates = dict() # words template
# collect the words and the template associated (a list of them if multiple template is associated)
for template in template_dictionary:
# index first template to consider
index_first_timestamp = find_previous_timestamp(template_dictionary[template], from_timestamp) or 0
# for all the revisions of that template starting from the first date possible
for index in range(index_first_timestamp, len(template_dictionary[template])):
words_list, t_stamp = template_dictionary[template][index]
# stop the iteration because we overcome the to_timestamp limit
if t_stamp > to_timestamp:
break
if not template in words_mapping:
words_mapping[template] = list()
words_mapping[template].append(template_dictionary[template][index]) # word lists for that template
for word in words_list:
if not word in word_templates:
word_templates[word] = list()
word_templates[word].append(template)
for word in word_templates:
trie.add_word(word, (word_templates[word], word)) # key is the word to search, value is the template
trie.make_automaton()
if not word_templates:
return None, None
return trie, words_mapping
|
ddd655235a78834260ce1a46c19d0e251f62aede
| 18,985 |
def check_monotonicity_at_split(
tree_df, tree_no, trend, variable, node, child_nodes_left, child_nodes_right
):
"""Function to check monotonic trend is in place at a given split in a single tree."""
if not isinstance(tree_df, pd.DataFrame):
raise TypeError("tree_df should be a pd.DataFrame")
if not isinstance(tree_no, int):
raise TypeError("tree_no should be an int")
if not isinstance(trend, int):
raise TypeError("trend should be an int")
if not isinstance(node, int):
raise TypeError("node should be an int")
if not isinstance(child_nodes_left, list):
raise TypeError("child_nodes_left should be an list")
if not isinstance(child_nodes_right, list):
raise TypeError("child_nodes_right should be an list")
all_child_nodes = child_nodes_left + child_nodes_right
tree_nodes = tree_df["nodeid"].tolist()
child_nodes_not_in_tree = list(set(all_child_nodes) - set(tree_nodes))
if len(child_nodes_not_in_tree) > 0:
raise ValueError(
"the following child nodes do not appear in tree; "
+ str(child_nodes_not_in_tree)
)
left_nodes_max_pred = tree_df.loc[
tree_df["nodeid"].isin(child_nodes_left), "weight"
].max()
right_nodes_min_pred = tree_df.loc[
tree_df["nodeid"].isin(child_nodes_right), "weight"
].min()
if trend == 1:
if left_nodes_max_pred <= right_nodes_min_pred:
monotonic = True
else:
monotonic = False
elif trend == -1:
if left_nodes_max_pred >= right_nodes_min_pred:
monotonic = True
else:
monotonic = False
else:
raise ValueError(
"unexpected value for trend; "
+ str(trend)
+ " variable; "
+ str(variable)
+ " node:"
+ str(node)
)
results = {
"variable": variable,
"tree": tree_no,
"nodeid": node,
"monotonic_trend": trend,
"monotonic": monotonic,
"child_nodes_left_max_prediction": left_nodes_max_pred,
"child_nodes_right_min_prediction": right_nodes_min_pred,
"child_nodes_left": str(child_nodes_left),
"child_nodes_right": str(child_nodes_right),
}
results_df = pd.DataFrame(results, index=[node])
return results_df
|
97ed2422c6f85112e364e9c63ff0a54d18b6377d
| 18,986 |
def allocate_usda_ers_mlu_land_in_urban_areas(df, attr, fbs_list):
"""
This function is used to allocate the USDA_ERS_MLU activity 'land in
urban areas' to NAICS 2012 sectors. Allocation is dependent on
assumptions defined in 'literature_values.py' as well as results from
allocating 'EIA_CBECS_Land' and 'EIA_MECS_Land' to land based sectors.
Methodology is based on the manuscript:
Lin Zeng and Anu Ramaswami
Impact of Locational Choices and Consumer Behaviors on Personal
Land Footprints: An Exploration Across the Urban–Rural Continuum in the
United States
Environmental Science & Technology 2020 54 (6), 3091-3102
DOI: 10.1021/acs.est.9b06024
:param df: df, USDA ERA MLU Land
:param attr: dictionary, attribute data from method yaml for activity set
:param fbs_list: list, FBS dfs for activities created prior
to the activity set that calls on this fxn
:return: df, allocated USDS ERS MLU Land, FBS format
"""
# define sector column to base calculations
sector_col = 'SectorConsumedBy'
vLogDetailed.info('Assuming total land use from MECS and CBECS included '
'in urban land area, so subtracting out calculated '
'MECS and CBECS land from MLU urban land area')
# read in the cbecs and mecs df from df_list
for df_i in fbs_list:
if (df_i['MetaSources'] == 'EIA_CBECS_Land').all():
cbecs = df_i
elif (df_i['MetaSources'] == 'EIA_MECS_Land').all():
mecs = df_i
# load the federal highway administration fees dictionary
fha_dict = get_transportation_sectors_based_on_FHA_fees()
df_fha = pd.DataFrame.from_dict(
fha_dict, orient='index').rename(
columns={'NAICS_2012_Code': sector_col})
# calculate total residential area from the American Housing Survey
residential_land_area = get_area_of_urban_land_occupied_by_houses_2013()
df_residential = df[df[sector_col] == 'F01000']
df_residential = df_residential.assign(FlowAmount=residential_land_area)
# make an assumption about the percent of urban area that is open space
openspace_multiplier = get_open_space_fraction_of_urban_area()
df_openspace = df[df[sector_col] == '712190']
df_openspace = df_openspace.assign(
FlowAmount=df_openspace['FlowAmount'] * openspace_multiplier)
# sum all uses of urban area that are NOT transportation
# first concat dfs for residential, openspace, commercial,
# and manufacturing land use
df_non_urban_transport_area = pd.concat(
[df_residential, df_openspace, cbecs, mecs], sort=False,
ignore_index=True)
df_non_urban_transport_area = \
df_non_urban_transport_area[['Location', 'Unit', 'FlowAmount']]
non_urban_transport_area_sum = df_non_urban_transport_area.groupby(
['Location', 'Unit'], as_index=False).agg(
{'FlowAmount': sum}).rename(columns={'FlowAmount': 'NonTransport'})
# compare units
compare_df_units(df, df_non_urban_transport_area)
# calculate total urban transportation by subtracting
# calculated areas from total urban land
df_transport = df.merge(non_urban_transport_area_sum, how='left')
df_transport = df_transport.assign(
FlowAmount=df_transport['FlowAmount'] - df_transport['NonTransport'])
df_transport.drop(columns=['NonTransport'], inplace=True)
# make an assumption about the percent of urban transport
# area used by airports
airport_multiplier = get_urban_land_use_for_airports()
df_airport = df_transport[df_transport[sector_col] == '488119']
df_airport = df_airport.assign(
FlowAmount=df_airport['FlowAmount'] * airport_multiplier)
# make an assumption about the percent of urban transport
# area used by railroads
railroad_multiplier = get_urban_land_use_for_railroads()
df_railroad = df_transport[df_transport[sector_col] == '482112']
df_railroad = df_railroad.assign(
FlowAmount=df_railroad['FlowAmount'] * railroad_multiplier)
# further allocate the remaining urban transportation area using
# Federal Highway Administration fees
# first subtract area for airports and railroads
air_rail_area = pd.concat([df_airport, df_railroad], sort=False)
air_rail_area = air_rail_area[['Location', 'Unit', 'FlowAmount']]
air_rail_area_sum = air_rail_area.groupby(
['Location', 'Unit'], as_index=False).agg(
{'FlowAmount': sum}).rename(columns={'FlowAmount': 'AirRail'})
df_highway = df_transport.merge(air_rail_area_sum, how='left')
df_highway = df_highway.assign(
FlowAmount=df_highway['FlowAmount'] - df_highway['AirRail'])
df_highway.drop(columns=['AirRail'], inplace=True)
# add fed highway administration fees
df_highway2 = df_highway.merge(df_fha, how='left')
df_highway2 = df_highway2[df_highway2['ShareOfFees'].notna()]
df_highway2 = df_highway2.assign(
FlowAmount=df_highway2['FlowAmount'] * df_highway2['ShareOfFees'])
df_highway2.drop(columns=['ShareOfFees'], inplace=True)
# concat all df subsets
allocated_urban_areas_df = pd.concat(
[df_residential, df_openspace, df_airport, df_railroad, df_highway2],
ignore_index=True, sort=False).reset_index(drop=True)
# aggregate because multiple rows to household data due to residential
# land area and highway fee shares
groupcols = list(df.select_dtypes(include=['object', 'int']).columns)
allocated_urban_areas_df_2 = aggregator(allocated_urban_areas_df,
groupcols)
return allocated_urban_areas_df_2
|
5fcb797b48b912595d722cde3ac0d499526ea899
| 18,987 |
def get_distances_between_points(ray_points3d, last_bin_width=1e10):
"""Estimates the distance between points in a ray.
Args:
ray_points3d: A tensor of shape `[A1, ..., An, M, 3]`,
where M is the number of points in a ray.
last_bin_width: A scalar indicating the witdth of the last bin.
Returns:
A tensor of shape `[A1, ..., An, M]` containing the distances between
the M points, with the distance of the last element set to a high value.
"""
shape.check_static(
tensor=ray_points3d,
tensor_name="ray_points3d",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=ray_points3d,
tensor_name="ray_points3d",
has_rank_greater_than=1)
dists = tf.norm(ray_points3d[..., 1:, :] - ray_points3d[..., :-1, :], axis=-1)
if last_bin_width > 0.0:
dists = tf.concat([dists, tf.broadcast_to([last_bin_width],
dists[..., :1].shape)], axis=-1)
return dists
|
34752bd64bcf4582945006c4bd3489397aa7350d
| 18,988 |
def reclassification_heavy_duty_trucks_to_light_commercial_vehicles(register_df: pd.DataFrame) -> pd.DataFrame:
"""
Replace Category to Light Commercial Vehicles for Heavy Duty Trucks of weight below 3500kg
Es Tracta de vehicles registrats TIPUS CAMIONS i per tant classificats en la categoria Heavy Duty Trucks quan no hi
pertoquen degut a pes inferior a 3500kg.
"""
anti = register_df[(register_df['TIPUS'] == 'CAMIONS') &
(register_df['PES_BUIT'] < 3500) &
(register_df['Category'] == 'Heavy Duty Trucks')]
info_logger.info(f'Total number of Heavy Duty Trucks converted to Light Commercial Vehicles loaded: {anti.shape[0]}')
result = anti_join_all_cols(register_df, anti)
recategorized_rows = anti.assign(Category='Light Commercial Vehicles')
return result.append(recategorized_rows)
|
638e3281c323c1dac4ddcde5e58e2eeac4131c04
| 18,989 |
def SpComp(rho, U, mesh, fea, penal):
"""Alias SpCompFunction class with the apply method"""
return SpCompFunction.apply(rho, U, mesh, fea, penal)
|
4c0997fa5213e291376feb415f74e48e273ea071
| 18,990 |
import json
def remove_event_class(request):
"""
Remove the given event class, and update the
order of the rest classes
"""
if request.user.is_authenticated:
class_id = request.POST.get("classId", None)
event_class = EventClass.get_classes_by_user(request.user.id).get(id=class_id)
event_class.delete()
return HttpResponse(json.dumps({}), content_type="application/json")
return render_to_response(
"todo_login.html",
{"error_info": constants.SESSION_EXPIRED_MSG,},
RequestContext(request),
)
|
aa4ac93cb8568e5bdc26c4d5913c9438d418b27e
| 18,991 |
def get_landmark_position_from_state(x, ind):
"""
Extract landmark position from state vector
"""
lm = x[STATE_SIZE + LM_SIZE * ind: STATE_SIZE + LM_SIZE * (ind + 1), :]
return lm
|
048d31bbd6810663d51e9db34b5c87ebec9b6f27
| 18,992 |
def get_rectangle(roi):
"""
Get the rectangle that has changing colors in the roi.
Returns boolean success value and the four rectangle points in the image
"""
gaussian = cv2.GaussianBlur(roi, (9, 9), 10.0)
roi = cv2.addWeighted(roi, 1.5, gaussian, -0.5, 0, roi)
nh, nw, r = roi.shape
# cluster
Z = roi.reshape((-1, 3))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 7
ret, label, centers = cv2.kmeans(Z, K, criteria, 10, 0)
centers = np.uint8(centers)
image_as_centers = centers[label.flatten()]
image_as_centers = image_as_centers.reshape((roi.shape))
labels = label.reshape((roi.shape[:2]))
possible_clusters = list(np.arange(K))
whiteness = map(lambda x: npl.norm(x - np.array([255, 255, 255])), centers)
whitest = np.argmin(whiteness)
possible_clusters.remove(whitest)
energys = []
correct_masks = []
for num, p in enumerate(possible_clusters):
mask_clusters = ma.masked_equal(labels, p)
draw_mask = mask_clusters.mask.astype(np.uint8)
draw_mask *= 255
labeled_array, num_features = mes.label(draw_mask)
count = np.bincount(labeled_array.flatten())
count = count[1:]
val = np.argmax(count)
mask_obj = ma.masked_equal(labeled_array, val + 1)
draw_mask = mask_obj.mask.astype(np.uint8)
draw_mask *= 255
# cv2.imshow(str(num), draw_mask)
# cv2.waitKey(0)
top = np.count_nonzero(draw_mask)
valz = np.fliplr(np.transpose(draw_mask.nonzero()))
rect = cv2.minAreaRect(valz)
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
rect_mask = np.zeros((nh, nw))
cv2.drawContours(rect_mask, [box], 0, 255, -1)
bottom = np.count_nonzero(rect_mask)
l, w, vcost = _get_lw(box)
if w < .001:
print 'WIDTH TOO SMALL'
continue
valz = np.fliplr(np.transpose(draw_mask.nonzero()))
area = cv2.contourArea(box)
area /= (nh * nw)
if vcost > .5:
print "VCOST TOO HIGH"
continue
if area < .03:
print area
print "TOOOO SMALL"
continue
if top / bottom < .7:
print "TOO SPARSE", top / bottom
continue
energy = area + 1.5 * top / bottom - abs(2.5 - l / w) - .2 * vcost
if energy < 0:
"LOW ENERGY!"
continue
print num, "area: ", area, "filled:", top, "total:", bottom, 'rat', top / bottom, "l/w", abs(2.5 - l / w), "vcost",
vcost, "energy", energy
energys.append(energy)
correct_masks.append(mask_obj)
if len(energys) == 0:
print "EVERY ENERGY WRONG"
return False, None
correct_masks = [x for y, x in sorted(zip(energys, correct_masks), reverse=True)]
energys = sorted(energys, reverse=True)
if len(energys) > 1 and abs(energys[0] - energys[1]) < .2:
print "TOO CLOSE TO CALLS"
return False, None
correct_mask = correct_masks[0]
colors = roi[correct_mask.mask]
draw_mask = correct_mask.mask.astype(np.uint8)
draw_mask *= 255
return True, colors
|
da3e6311f6a598cf57cffd2c0bb06f0ca53fa108
| 18,993 |
async def challenge_process_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in process
"""
return await populate_challenge()
|
cf553c0697c824df4273838c7d87ffe6e8ab6ae7
| 18,994 |
import pkgutil
def _read_pdg_masswidth(filename):
"""Read the PDG mass and width table and return a dictionary.
Parameters
----------
filname : string
Path to the PDG data file, e.g. 'data/pdg/mass_width_2015.mcd'
Returns
-------
particles : dict
A dictionary where the keys are the particle names with the charge
appended in case of a multiplet with different masses, e.g. 't'
for the top quark, 'K+' and 'K0' for kaons.
The value of the dictionary is again a dictionary with the following
keys:
- 'id': PDG particle ID
- 'mass': list with the mass, postitive and negative error in GeV
- 'width': list with the width, postitive and negative error in GeV
- 'name': same as the key
"""
data = pkgutil.get_data('flavio.physics', filename)
lines = data.decode('utf-8').splitlines()
particles_by_name = {}
for line in lines:
if line.strip()[0] == '*':
continue
mass = ((line[33:51]), (line[52:60]), (line[61:69]))
if mass[0].replace(' ', '') == '':
# if mass is empty, go to next line
# (necessasry for 2019 neutrino entries)
continue
mass = [float(m) for m in mass]
width = ((line[70:88]), (line[89:97]), (line[98:106]))
if width[0].strip() == '':
width = (0,0,0)
else:
width = [float(w) for w in width]
ids = line[0:32].split()
charges = line[107:128].split()[1].split(',')
if len(ids) != len(charges):
raise ValueError()
for i, id_ in enumerate(ids):
particle = {}
particle_charge = charges[i].strip()
particle[particle_charge] = {}
particle[particle_charge]['id'] = id_.strip()
particle[particle_charge]['mass'] = mass
particle[particle_charge]['charge'] = particle_charge
particle[particle_charge]['width'] = width
particle_name = line[107:128].split()[0]
particle[particle_charge]['name'] = particle_name
if particle_name in particles_by_name.keys():
particles_by_name[particle_name].update(particle)
else:
particles_by_name[particle_name] = particle
result = { k + kk: vv for k, v in particles_by_name.items() for kk, vv in v.items() if len(v) > 1}
result.update({ k: list(v.values())[0] for k, v in particles_by_name.items() if len(v) == 1})
return result
|
c61202aeb4ad36e22786457306de1ac5773b56d2
| 18,995 |
import torch
def fps_and_pred(model, batch, **kwargs):
"""
Get fingeprints and predictions from the model.
Args:
model (nff.nn.models): original NFF model loaded
batch (dict): batch of data
Returns:
results (dict): model predictions and its predicted
fingerprints, conformer weights, etc.
"""
model.eval()
# make the fingerprints
outputs, xyz = model.make_embeddings(batch, xyz=None, **kwargs)
# pool to get the learned weights and pooled fingerprints
pooled_fp, learned_weights = model.pool(outputs)
# get the final results
results = model.readout(pooled_fp)
# add sigmoid if it's a classifier and not in training mode
if model.classifier:
keys = list(model.readout.readout.keys())
for key in keys:
results[key] = torch.sigmoid(results[key])
# add any required gradients
results = model.add_grad(batch=batch, results=results, xyz=xyz)
# put into a dictionary
conf_fps = [i.cpu().detach() for i in outputs["conf_fps_by_smiles"]]
energy = batch.get("energy")
boltz_weights = batch.get("weights")
# with operations to de-batch
n_confs = [(n // m).item()
for n, m in zip(batch['num_atoms'], batch['mol_size'])]
for key, val in results.items():
results[key] = [i for i in val]
results.update({"fp": [i for i in pooled_fp],
"conf_fps": conf_fps,
"learned_weights": learned_weights,
"boltz_weights": (list(torch.split
(boltz_weights, n_confs)))})
if energy is not None:
results.update({"energy": list(torch.split(energy, n_confs))})
return results
|
1a8cca3ffe0d386e506ab42f6e77e00b1a5975d1
| 18,996 |
import re
def preprocess_text(text):
"""
Should return a list of words
"""
text = contract_words(text)
text = text.lower()
# text = text.replace('"', "").replace(",", "").replace("'", "")
text = text.replace('"', "").replace(",", "").replace("'", "").replace(".", " .") ## added by PAVAN
## To capture multiple # feature -- added by PAVAN
if re.search(r'[a-z]+\#', text):
tmp_ls = text.split()
text = ' '.join(
[re.sub(pattern=r'\#', repl=' #', string=str(i)) if re.search(r'[a-z]+\#', str(i)) else i for i in tmp_ls])
## To capture # feature -- added by PAVAN
if re.search(r'\#[a-z]+', text):
tmp_ls = text.split()
text = ' '.join(
[re.sub(pattern=r'\#', repl='hashtagfea ', string=str(i)) if re.search(r'\#[a-z]+', str(i)) else i for i in
tmp_ls])
return text.split()
|
0b42b57629e68c2f0bf3831dc226a2ba823cfdb3
| 18,997 |
import unicodedata
def unicodeToAscii(s):
"""unicodeToAscii
Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
For example, 'Ślusàrski' -> 'Slusarski'
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
|
7791ae244499b2448ac4bc5de0fde35057d1f5d5
| 18,998 |
def update_ip_table(nclicks, value):
"""
Function that updates the IP table in the Elasticsearch Database that
contains the frequency as well as the IP address of the machine querying
that particular domain.
Args:
nclicks: Contains the number of clicks registered by the submit button.
value: Contains the domain name corresponding to which the IP table has
to be returned.
Returns:
The IP address data regarding the number of times a particular domain
was queried by a particular machine.
"""
if value is None or value == '':
return []
else:
try:
count = es.get(index=value, id=1)['_source']['count']
domain_names = [key for (key, value) in sorted(count.items(),
key=lambda x: x[1],
reverse=True)]
data = [dict({'sl_no': j + 1, 'ip': i, 'count': count[i]})
for i, j in zip(domain_names, range(len(count)))]
except:
data = []
return data
|
ef0df6f1fb79d86a707990d0790dadabecbd22c1
| 18,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.