content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def annotation_multi_vertical_height(_img, _x, _y_list, _line_color, _text_color, _text_list,
_thickness=1,
_with_arrow=True):
"""
纵向标注多个高度
:param _img: 需要标注的图像
:param _x: 当前直线所在宽度
:param _y_list: 所有y的列表
:param _line_color: 线条颜色(bgr)
:param _text_color: 文本颜色(bgr)
:param _text_list: 所有需要显示的文本
:param _thickness: 线条粗细
:param _with_arrow: 线条两端是否带箭头
:return: 标注后的图像
"""
assert len(_y_list) - 1 == len(_text_list), '线段数与字符串数不匹配'
to_return_img = _img.copy()
# 需要绘制:
# 1. 双向箭头线
# 2. 箭头到头的直线
# 3. 线条对应的文字
for m_start_y, m_end_y, m_text in zip(_y_list[:-1], _y_list[1:], _text_list):
if _with_arrow:
cv2.arrowedLine(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness)
cv2.arrowedLine(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness)
else:
cv2.line(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness)
cv2.line(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness)
text_start_x = _x + 10
text_start_y = m_start_y + (m_end_y - m_start_y) // 2
to_return_img = __annotation_text_on_image(to_return_img, (text_start_x, text_start_y), _text_color, m_text)
for m_y in _y_list:
cv2.line(to_return_img, (_x - 12, m_y), (_x + 12, m_y), _line_color, thickness=_thickness)
return to_return_img | 2e181eddee2dea969b14dc18f910d4c5f82fb371 | 9,300 |
async def list_(hub, ctx, registry_name, resource_group, **kwargs):
"""
.. versionadded:: 3.0.0
Lists all the replications for the specified container registry.
:param registry_name: The name of the container registry.
:param resource_group: The name of the resource group to which the container registry belongs.
CLI Example:
.. code-block:: bash
azurerm.containerregistry.replication.list testrepo testgroup
"""
result = {}
regconn = await hub.exec.azurerm.utils.get_client(
ctx, "containerregistry", **kwargs
)
try:
repls = await hub.exec.azurerm.utils.paged_object_to_list(
regconn.replications.list(
registry_name=registry_name, resource_group_name=resource_group
)
)
for repl in repls:
result[repl["name"]] = repl
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error(
"containerregistry", str(exc), **kwargs
)
result = {"error": str(exc)}
return result | aa24ab14278e49da35fe6851d71e6d375f763b4d | 9,301 |
import timeit
def dbrg(images, T, r):
"""
Segmentation by density-based region growing (DBRG).
Parameters
----------
n : int
Number of blurred images.
M : np.ndarray
The mask image.
r : int
Density connectivity search radius.
"""
n = len(images)
M = _generate_init_mask(images, T)
D = _density_distribution(n, M, r)
S = _generate_seeds(D)
# make sure there is at least one seed
assert S.any()
# unlabeled
R = np.full(M.shape, 0, dtype=np.uint32)
V = np.full(M.shape, np.NINF, dtype=np.float32)
# label by density map
for i, d in enumerate(D):
logger.debug("density {}".format(i))
R[(d > V) & S] = i + 1
V[(d > V) & S] = d[(d > V) & S]
# label by density connectivity
v = np.empty(len(D) + 1, dtype=np.uint32) # buffer
@timeit
@jit(nopython=True)
def ps_func(M, R, v):
n, m = M.shape
ps = [] # reset of the pixel coordinates
for y in range(0, n):
for x in range(0, m):
if R[y, x] > 0:
continue
pu = min(y + r, n - 1)
pd = max(y - r, 0)
pr = min(x + r, m - 1)
pl = max(x - r, 0)
v.fill(0)
for yy in range(pd, pu + 1):
for xx in range(pl, pr + 1):
if (xx - x) * (xx - x) + (yy - y) * (yy - y) <= r * r:
v[R[yy, xx]] += 1
R[y, x] = v.argmax()
if R[y, x] == 0:
ps.append((y, x))
return ps
ps = ps_func(M, R, v)
# label by nearest neighbor
@timeit
@jit(nopython=True)
def psv_func(ps, M, R):
n, m = M.shape
# psv = [] # filled result
for y, x in ps:
r = 1
while True:
pu = min(y + r, n - 1)
pd = max(y - r, 0)
pr = min(x + r, m - 1)
pl = max(x - r, 0)
v = []
for yy in range(pd, pu + 1):
for xx in range(pl, pr + 1):
if R[yy, xx] > 0:
v.append(
(R[yy, xx], (xx - x) * (xx - x) + (yy - y) * (yy - y))
)
if len(v) == 0:
r += 1
else:
# v.sort(key=lambda p: p[1])
# psv.append(v[0][0])
R_min, _d_min = v[0]
for _R, _d in v[1:]:
if _d < _d_min:
R_min, _d_min = _R, _d
# psv.append(R_min)
R[y, x] = R_min
break
# return psv
return R
# psv = psv_func(ps, M, R)
if ps:
R = psv_func(ps, M, R)
# move into psv
# for (y, x), v in zip(ps, psv):
# R[y, x] = v
# make sure each position is assigned a mask value
assert np.all(R != 0)
return R | 0fb3aa19252be95d436013025e90f2dd9a12da4e | 9,302 |
from typing import Union
def latest_window_partition_selector(
context: ScheduleEvaluationContext, partition_set_def: PartitionSetDefinition[TimeWindow]
) -> Union[SkipReason, Partition[TimeWindow]]:
"""Creates a selector for partitions that are time windows. Selects latest time window that ends
before the schedule tick time.
"""
partitions = partition_set_def.get_partitions(context.scheduled_execution_time)
if len(partitions) == 0:
return SkipReason()
else:
return partitions[-1] | bac6fe78b0111cdf6272c7bf08a0d555971c20a5 | 9,303 |
import os
def env_or_val(env, val, *args, __type=str, **kwargs):
"""Return value of environment variable (if it's defined) or a given fallback value
:param env: Environment variable to look for
:type env: ``str``
:param val: Either the fallback value or function to call to compute it
:type val: ``str`` or a function
:param args: If ``val`` is a function, these are the ``*args`` to pass to that function
:type args: ``list``
:param __type: type of value to return when extracting from env variable, can be one of
``str``, ``int``, ``float``, ``bool``, ``list``
:type __type: ``type``
:param kwargs: If ``val`` is a function, these are the ``**kwargs`` to pass to that function
:type kwargs: ``dict``
:return: Either the env value (if defined) or the fallback value
:rtype: ``str``
"""
if env not in os.environ:
if isinstance(val, type(env_or_val)):
val = val(*args, **kwargs)
return val
retval = os.environ.get(env)
if __type in [str, int, float]:
return __type(retval)
elif __type is bool:
if retval.lower() in ["true", "1", "yes"]:
return True
else:
return False
elif __type is list:
return retval.split(":")
else:
raise ValueError("__type must be one of: str, int, float, bool, list") | 6a94c627ec4af63f54f5d3b6627141cb0624e445 | 9,304 |
def html(i):
"""
Input: {
(skip_cid_predix) - if 'yes', skip "?cid=" prefix when creating URLs
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
d=i.get('dict',{})
scp=i.get('skip_cid_prefix','')
bscp=(scp=="yes")
short=i.get('short','')
llm=d.get('meta',{})
llmisc=llm.get('misc',{})
lldict=llm.get('dict',{})
repo_url1=llmisc.get('repo_url1','')
repo_url2=llmisc.get('repo_url2','')
repo_url3=llmisc.get('repo_url3','')
duoa=llmisc.get('data_uoa','')
duid=llmisc.get('data_uid','')
ruoa=llmisc.get('repo_uoa','')
ruid=llmisc.get('repo_uid','')
muid=llmisc.get('module_uid','')
muoa=llmisc.get('module_uoa','')
#Main
title=llmisc.get('title','')
authors=llmisc.get('authors','')
where=llmisc.get('where','')
paper_pdf_url=llmisc.get('paper_pdf_url','')
paper_doi_url=llmisc.get('paper_doi_url','')
artifact_doi_url=llmisc.get('artifact_doi_url','')
workflow=llmisc.get('workflow','')
workflow_url=llmisc.get('workflow_url','')
h=''
article=''
if title!='':
article='<b>'+title+'</b>'
if authors!='':
h+='<div id="ck_entries_space4"></div>\n'
h+='<i>'+authors+'</i>\n'
baaa=llmisc.get('badge_acm_artifact_available','')
baaf=llmisc.get('badge_acm_artifact_functional','')
baar=llmisc.get('badge_acm_artifact_reusable','')
barr=llmisc.get('badge_acm_results_reproduced','')
barp=llmisc.get('badge_acm_results_replicated','')
badges=''
if baaa!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_available"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_available_dl.jpg" width="64"></a>'
if baaf!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_functional"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_evaluated_functional_dl.jpg" width="64"></a>'
if baar!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_reusable"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_evaluated_reusable_dl.jpg" width="64"></a>'
if barr!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#results_validated"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/results_reproduced_dl.jpg" width="64"></a>'
if barp!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#results_validated"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/results_replicated_dl.jpg" width="64"></a>'
if workflow.lower()=='ck':
x1=''
x2=''
if workflow_url!='':
x1='<a href="'+workflow_url+'">'
x2='</a>'
badges+=' '+x1+'<img src="https://ctuning.org/ae/stamps/ck-workflow.png" width="100">'+x2
if badges!='':
h+='<div id="ck_entries_space4"></div>\n'
h+='<center>'+badges+'</center>\n'
h1=''
if short!='yes':
h+='<div style="background-color:#efefef;margin:5px;padding:5px;">\n'
url0=i.get('url','')
urlc=url0.replace('index.php','c.php') # Needed for components
# x1=''
# x2=''
# if url0!='' and ruid!='':
# prfx=''
# if not bscp: prfx='cid='
# x1='<a href="'+url0+prfx+cfg['module_deps']['component.repo']+':'+ruid+'" target="_blank">'
# x2='</a>'
# h+='<b>Repo name:</b> '+x1+ruoa+x2+'<br>\n'
where_url=llmisc.get('where_url','')
if where!='':
x1=''
x2=''
if where_url!='':
x1='<a href="'+where_url+'">'
x2='</a>'
h+='<b>Where published:</b> '+x1+where+x2+'<br>\n'
if paper_doi_url!='':
x=paper_doi_url
j=paper_doi_url.find('doi.org/')
if j>0: x=paper_doi_url[j+8:]
h+='<b>Article DOI:</b> <a href="'+paper_doi_url+'">'+x+'</a><br>\n'
if paper_pdf_url!='':
h+='<b>Article:</b> <a href="'+paper_pdf_url+'">PDF</a><br>\n'
if artifact_doi_url!='':
x=artifact_doi_url
j=artifact_doi_url.find('doi.org/')
if j>0: x=artifact_doi_url[j+8:]
h+='<b>Artifact DOI:</b> <a href="'+artifact_doi_url+'">'+x+'</a><br>\n'
uaa=llmisc.get('unified_artifact_appendix','')
if uaa!='':
h+='<b>Unified artifact appendix:</b> <a href="'+uaa+'">Link</a><br>\n'
arts=llmisc.get('artifact_sources','')
arts_url=llmisc.get('artifact_sources_url','')
if arts_url!='':
x=arts_url
if arts!='': x=arts
h+='<b>Artifact before standardization:</b> <a href="'+arts_url+'">'+x+'</a><br>\n'
if workflow_url!='':
x=workflow_url
y='Automated workflow'
if workflow!='':
x=workflow
if x=='CK':
x='Link'
y='Standardized CK workflow'
h+='<b>'+y+':</b> <a href="'+workflow_url+'">'+x+'</a>\n'
ck_repo_uid=llmisc.get('ck_repo_uid','')
if ck_repo_uid!='':
prfx=''
if not bscp: prfx='cid='
x=urlc+prfx+cfg['module_deps']['component.repo']+':'+ck_repo_uid
h+=' (<a href="'+x+'">ReproIndex</a>)\n'
h+='<br>\n'
tasks=llmisc.get('tasks',{})
if len(tasks)>0:
h+='<b>Standardized CK pipelines (programs):</b><br>\n'
h+='<div style="margin-left:20px;">\n'
h+=' <ul>\n'
for tuid in tasks:
tt=tasks[tuid]
tuoa=tt.get('data_uoa','')
if tuoa!='':
prfx=''
if not bscp: prfx='cid='
x='<a href="'+urlc+prfx+cfg['module_deps']['component.program']+':'+tuid+'" target="_blank">'+tuoa+'</a>'
h+=' <li><span style="color:#2f0000;">'+x+'</li>\n'
h+=' </ul>\n'
h+='</div>\n'
results=llmisc.get('results','')
results_url=llmisc.get('results_url','')
if results_url!='':
x=results_url
if results!='': x=results
h+='<b>Reproducible results:</b> <a href="'+results_url+'">'+x+'</a><br>\n'
some_results_replicated=llmisc.get('some_results_replicated','')
if some_results_replicated=='yes':
h+='<b>Some results replicated:</b> ✔<br>\n'
rurl=llmisc.get('reproducibility_url','')
if rurl!='':
x='Link'
if 'acm' in rurl.lower() or 'ctuning' in rurl.lower():
x='ACM and cTuning'
h+='<b>Reproducible methodology:</b> <a href="'+rurl+'">'+x+'</a><br>\n'
results_dashboard_url=llmisc.get('results_dashboard_url','')
if results_dashboard_url!='':
x=results_dashboard_url
j=x.find('://')
if j>=0:
x=x[j+3:]
h+='<b>Dashboard with results:</b> <a href="'+results_dashboard_url+'">'+x+'</a><br>\n'
h+='</div>\n'
# Extras
h1=''
if paper_doi_url!='':
h1+='[ <a href="'+paper_doi_url+'" target="_blank">paper</a> ] \n'
# ck_repo_uid=llmisc.get('ck_repo_uid','')
# if ck_repo_uid!='':
# prfx=''
# if not bscp: prfx='cid='
# x=urlc+prfx+cfg['module_deps']['component.repo']+':'+ck_repo_uid
# h1+='[ <a href="'+x+'" target="_blank">CK repository</a> ] \n'
return {'return':0, 'html':h, 'html1':h1, 'article':article} | a2effe3ac9cf9fb8678283cb9d23cf574bc54700 | 9,305 |
def multi_particle_first_np_metafit(n):
"""Fit to plots of two-body matrix elements from various normal-ordering
schemes, where only the first n points are taken from each scheme
"""
name = b'multi_particle_first_{}p_metafit'.format(n)
def mpfnp(fitfn, exp_list, **kwargs):
return multi_particle_metafit_int(
fitfn, exp_list,
sourcedir=DPATH_FILES_INT, savedir=DPATH_PLOTS,
transform=first_np(n),
super_transform_post=s_combine_like(['interaction']),
code='mpf{}p'.format(n), mf_name=name,
xlabel='A', ylabel='Energy (MeV)', **kwargs
)
mpfnp.__name__ = name
return mpfnp | 384b4d7a1627e554e3ba1583236dbb8fde136b9c | 9,306 |
from typing import Union
from pathlib import Path
from typing import List
from typing import Dict
import json
def readJSONLFile(file_name: Union[str, Path]) -> List[Dict]:
"""
Read a '.jsonl' file and create a list of dicts
Args:
file_name: `Union[str,Path]`
The file to open
Returns:
The list of dictionaries read from the 'file_name'
"""
lines = (
open(file_name, 'r', encoding='utf-8').readlines() if isinstance(file_name, str) else
file_name.read_text('utf-8').splitlines(False)
)
return [json.loads(line) for line in lines] | 8e33fad766a255578179828dc76ec793c02f90b9 | 9,307 |
def _dtype_from_cogaudioformat(format: CogAudioFormat) -> np.dtype:
"""This method returns the numpy "data type" for a particular audio format."""
if COG_AUDIO_IS_INT(format):
if COG_AUDIO_FORMAT_DEPTH(format) == COG_AUDIO_FORMAT_DEPTH_S24:
return np.dtype(np.uint8)
elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 2:
return np.dtype(np.int16)
elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 4:
return np.dtype(np.int32)
elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 8:
return np.dtype(np.int64)
elif COG_AUDIO_IS_FLOAT(format):
return np.dtype(np.float32)
elif COG_AUDIO_IS_DOUBLE(format):
return np.dtype(np.float64)
raise NotImplementedError("Cog Audio Format not amongst those supported for numpy array interpretation") | d41b01fddd798eaa526e767775138e4a4e3ce718 | 9,308 |
def makeSiteWhitelist(jsonName, siteList):
"""
Provided a template json file name and the site white list from
the command line options; return the correct site white list based
on some silly rules
"""
if 'LHE_PFN' in jsonName:
siteList = ["T1_US_FNAL"]
print("Overwritting SiteWhitelist to: %s" % siteList)
elif 'LHE' in jsonName or 'DQMHarvest' in jsonName:
siteList = ["T2_CH_CERN"]
print("Overwritting SiteWhitelist to: %s" % siteList)
return siteList | 8f8b11739a30b4338b8dd31afb6c3c57545af6d0 | 9,309 |
import json
import jsonschema
def loadConfig(configFilePath: str) -> {}:
"""Loads configuration"""
config = {}
with open(configFilePath) as configFile:
config = json.load(configFile)
configSchema = {}
with open(CONFIG_SCHEMA_FILE_PATH, "r") as configSchemaFile:
configSchema = json.load(configSchemaFile)
jsonschema.validate(instance=config, schema=configSchema)
return config | d5e1cbd3bc1f61d329f26a40d9dff5b14ca76f22 | 9,310 |
def version_info():
"""
Get version of vakt package as tuple
"""
return tuple(map(int, __version__.split('.'))) | 446a637134484e835f522f2f67c19110796f503d | 9,311 |
from sys import flags
import six
def dacl(obj_name=None, obj_type="file"):
"""
Helper function for instantiating a Dacl class.
Args:
obj_name (str):
The full path to the object. If None, a blank DACL will be created.
Default is None.
obj_type (str):
The type of object. Default is 'File'
Returns:
object: An instantiated Dacl object
"""
if not HAS_WIN32:
return
class Dacl(flags(False)):
"""
DACL Object
"""
def __init__(self, obj_name=None, obj_type="file"):
"""
Either load the DACL from the passed object or create an empty DACL.
If `obj_name` is not passed, an empty DACL is created.
Args:
obj_name (str):
The full path to the object. If None, a blank DACL will be
created
obj_type (Optional[str]):
The type of object.
Returns:
obj: A DACL object
Usage:
.. code-block:: python
# Create an Empty DACL
dacl = Dacl(obj_type=obj_type)
# Load the DACL of the named object
dacl = Dacl(obj_name, obj_type)
"""
# Validate obj_type
if obj_type.lower() not in self.obj_type:
raise SaltInvocationError(
'Invalid "obj_type" passed: {0}'.format(obj_type)
)
self.dacl_type = obj_type.lower()
if obj_name is None:
self.dacl = win32security.ACL()
else:
if "registry" in self.dacl_type:
obj_name = self.get_reg_name(obj_name)
try:
sd = win32security.GetNamedSecurityInfo(
obj_name, self.obj_type[self.dacl_type], self.element["dacl"]
)
except pywintypes.error as exc:
if "The system cannot find" in exc.strerror:
msg = "System cannot find {0}".format(obj_name)
log.exception(msg)
raise CommandExecutionError(msg)
raise
self.dacl = sd.GetSecurityDescriptorDacl()
if self.dacl is None:
self.dacl = win32security.ACL()
def get_reg_name(self, obj_name):
"""
Take the obj_name and convert the hive to a valid registry hive.
Args:
obj_name (str):
The full path to the registry key including the hive, eg:
``HKLM\\SOFTWARE\\salt``. Valid options for the hive are:
- HKEY_LOCAL_MACHINE
- MACHINE
- HKLM
- HKEY_USERS
- USERS
- HKU
- HKEY_CURRENT_USER
- CURRENT_USER
- HKCU
- HKEY_CLASSES_ROOT
- CLASSES_ROOT
- HKCR
Returns:
str:
The full path to the registry key in the format expected by
the Windows API
Usage:
.. code-block:: python
import salt.utils.win_dacl
dacl = salt.utils.win_dacl.Dacl()
valid_key = dacl.get_reg_name('HKLM\\SOFTWARE\\salt')
# Returns: MACHINE\\SOFTWARE\\salt
"""
# Make sure the hive is correct
# Should be MACHINE, USERS, CURRENT_USER, or CLASSES_ROOT
hives = {
# MACHINE
"HKEY_LOCAL_MACHINE": "MACHINE",
"MACHINE": "MACHINE",
"HKLM": "MACHINE",
# USERS
"HKEY_USERS": "USERS",
"USERS": "USERS",
"HKU": "USERS",
# CURRENT_USER
"HKEY_CURRENT_USER": "CURRENT_USER",
"CURRENT_USER": "CURRENT_USER",
"HKCU": "CURRENT_USER",
# CLASSES ROOT
"HKEY_CLASSES_ROOT": "CLASSES_ROOT",
"CLASSES_ROOT": "CLASSES_ROOT",
"HKCR": "CLASSES_ROOT",
}
reg = obj_name.split("\\")
passed_hive = reg.pop(0)
try:
valid_hive = hives[passed_hive.upper()]
except KeyError:
log.exception("Invalid Registry Hive: %s", passed_hive)
raise CommandExecutionError(
"Invalid Registry Hive: {0}".format(passed_hive)
)
reg.insert(0, valid_hive)
return r"\\".join(reg)
def add_ace(self, principal, access_mode, permissions, applies_to):
"""
Add an ACE to the DACL
Args:
principal (str):
The sid of the user/group to for the ACE
access_mode (str):
Determines the type of ACE to add. Must be either ``grant``
or ``deny``.
permissions (str, list):
The type of permissions to grant/deny the user. Can be one
of the basic permissions, or a list of advanced permissions.
applies_to (str):
The objects to which these permissions will apply. Not all
these options apply to all object types.
Returns:
bool: True if successful, otherwise False
Usage:
.. code-block:: python
dacl = Dacl(obj_type=obj_type)
dacl.add_ace(sid, access_mode, permission, applies_to)
dacl.save(obj_name, protected)
"""
sid = get_sid(principal)
if self.dacl is None:
raise SaltInvocationError("You must load the DACL before adding an ACE")
# Get the permission flag
perm_flag = 0
if isinstance(permissions, six.string_types):
try:
perm_flag = self.ace_perms[self.dacl_type]["basic"][permissions]
except KeyError as exc:
msg = "Invalid permission specified: {0}".format(permissions)
log.exception(msg)
raise CommandExecutionError(msg, exc)
else:
try:
for perm in permissions:
perm_flag |= self.ace_perms[self.dacl_type]["advanced"][perm]
except KeyError as exc:
msg = "Invalid permission specified: {0}".format(perm)
log.exception(msg)
raise CommandExecutionError(msg, exc)
if access_mode.lower() not in ["grant", "deny"]:
raise SaltInvocationError(
"Invalid Access Mode: {0}".format(access_mode)
)
# Add ACE to the DACL
# Grant or Deny
try:
if access_mode.lower() == "grant":
self.dacl.AddAccessAllowedAceEx(
win32security.ACL_REVISION_DS,
# Some types don't support propagation
# May need to use 0x0000 instead of None
self.ace_prop.get(self.dacl_type, {}).get(applies_to),
perm_flag,
sid,
)
elif access_mode.lower() == "deny":
self.dacl.AddAccessDeniedAceEx(
win32security.ACL_REVISION_DS,
self.ace_prop.get(self.dacl_type, {}).get(applies_to),
perm_flag,
sid,
)
else:
log.exception("Invalid access mode: %s", access_mode)
raise SaltInvocationError(
"Invalid access mode: {0}".format(access_mode)
)
except Exception as exc: # pylint: disable=broad-except
return False, "Error: {0}".format(exc)
return True
def order_acl(self):
"""
Put the ACEs in the ACL in the proper order. This is necessary
because the add_ace function puts ACEs at the end of the list
without regard for order. This will cause the following Windows
Security dialog to appear when viewing the security for the object:
``The permissions on Directory are incorrectly ordered, which may
cause some entries to be ineffective.``
.. note:: Run this function after adding all your ACEs.
Proper Orders is as follows:
1. Implicit Deny
2. Inherited Deny
3. Implicit Deny Object
4. Inherited Deny Object
5. Implicit Allow
6. Inherited Allow
7. Implicit Allow Object
8. Inherited Allow Object
Usage:
.. code-block:: python
dacl = Dacl(obj_type=obj_type)
dacl.add_ace(sid, access_mode, applies_to, permission)
dacl.order_acl()
dacl.save(obj_name, protected)
"""
new_dacl = Dacl()
deny_dacl = Dacl()
deny_obj_dacl = Dacl()
allow_dacl = Dacl()
allow_obj_dacl = Dacl()
# Load Non-Inherited ACEs first
for i in range(0, self.dacl.GetAceCount()):
ace = self.dacl.GetAce(i)
if ace[0][1] & win32security.INHERITED_ACE == 0:
if ace[0][0] == win32security.ACCESS_DENIED_ACE_TYPE:
deny_dacl.dacl.AddAccessDeniedAceEx(
win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2]
)
elif ace[0][0] == win32security.ACCESS_DENIED_OBJECT_ACE_TYPE:
deny_obj_dacl.dacl.AddAccessDeniedAceEx(
win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2]
)
elif ace[0][0] == win32security.ACCESS_ALLOWED_ACE_TYPE:
allow_dacl.dacl.AddAccessAllowedAceEx(
win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2]
)
elif ace[0][0] == win32security.ACCESS_ALLOWED_OBJECT_ACE_TYPE:
allow_obj_dacl.dacl.AddAccessAllowedAceEx(
win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2]
)
# Load Inherited ACEs last
for i in range(0, self.dacl.GetAceCount()):
ace = self.dacl.GetAce(i)
if (
ace[0][1] & win32security.INHERITED_ACE
== win32security.INHERITED_ACE
):
ace_prop = ace[0][1] ^ win32security.INHERITED_ACE
if ace[0][0] == win32security.ACCESS_DENIED_ACE_TYPE:
deny_dacl.dacl.AddAccessDeniedAceEx(
win32security.ACL_REVISION_DS, ace_prop, ace[1], ace[2]
)
elif ace[0][0] == win32security.ACCESS_DENIED_OBJECT_ACE_TYPE:
deny_obj_dacl.dacl.AddAccessDeniedAceEx(
win32security.ACL_REVISION_DS, ace_prop, ace[1], ace[2]
)
elif ace[0][0] == win32security.ACCESS_ALLOWED_ACE_TYPE:
allow_dacl.dacl.AddAccessAllowedAceEx(
win32security.ACL_REVISION_DS, ace_prop, ace[1], ace[2]
)
elif ace[0][0] == win32security.ACCESS_ALLOWED_OBJECT_ACE_TYPE:
allow_obj_dacl.dacl.AddAccessAllowedAceEx(
win32security.ACL_REVISION_DS, ace_prop, ace[1], ace[2]
)
# Combine ACEs in the proper order
# Deny, Deny Object, Allow, Allow Object
# Deny
for i in range(0, deny_dacl.dacl.GetAceCount()):
ace = deny_dacl.dacl.GetAce(i)
new_dacl.dacl.AddAccessDeniedAceEx(
win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2]
)
# Deny Object
for i in range(0, deny_obj_dacl.dacl.GetAceCount()):
ace = deny_obj_dacl.dacl.GetAce(i)
new_dacl.dacl.AddAccessDeniedAceEx(
win32security.ACL_REVISION_DS,
ace[0][1] ^ win32security.INHERITED_ACE,
ace[1],
ace[2],
)
# Allow
for i in range(0, allow_dacl.dacl.GetAceCount()):
ace = allow_dacl.dacl.GetAce(i)
new_dacl.dacl.AddAccessAllowedAceEx(
win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2]
)
# Allow Object
for i in range(0, allow_obj_dacl.dacl.GetAceCount()):
ace = allow_obj_dacl.dacl.GetAce(i)
new_dacl.dacl.AddAccessAllowedAceEx(
win32security.ACL_REVISION_DS,
ace[0][1] ^ win32security.INHERITED_ACE,
ace[1],
ace[2],
)
# Set the new dacl
self.dacl = new_dacl.dacl
def get_ace(self, principal):
"""
Get the ACE for a specific principal.
Args:
principal (str):
The name of the user or group for which to get the ace. Can
also be a SID.
Returns:
dict: A dictionary containing the ACEs found for the principal
Usage:
.. code-block:: python
dacl = Dacl(obj_type=obj_type)
dacl.get_ace()
"""
principal = get_name(principal)
aces = self.list_aces()
# Filter for the principal
ret = {}
for inheritance in aces:
if principal in aces[inheritance]:
ret[inheritance] = {principal: aces[inheritance][principal]}
return ret
def list_aces(self):
"""
List all Entries in the dacl.
Returns:
dict: A dictionary containing the ACEs for the object
Usage:
.. code-block:: python
dacl = Dacl('C:\\Temp')
dacl.list_aces()
"""
ret = {"Inherited": {}, "Not Inherited": {}}
# loop through each ACE in the DACL
for i in range(0, self.dacl.GetAceCount()):
ace = self.dacl.GetAce(i)
# Get ACE Elements
user, a_type, a_prop, a_perms, inheritance = self._ace_to_dict(ace)
if user in ret[inheritance]:
ret[inheritance][user][a_type] = {
"applies to": a_prop,
"permissions": a_perms,
}
else:
ret[inheritance][user] = {
a_type: {"applies to": a_prop, "permissions": a_perms}
}
return ret
def _ace_to_dict(self, ace):
"""
Helper function for creating the ACE return dictionary
"""
# Get the principal from the sid (object sid)
sid = win32security.ConvertSidToStringSid(ace[2])
try:
principal = get_name(sid)
except CommandExecutionError:
principal = sid
# Get the ace type
ace_type = self.ace_type[ace[0][0]]
# Is the inherited ace flag present
inherited = ace[0][1] & win32security.INHERITED_ACE == 16
# Ace Propagation
ace_prop = "NA"
# Get the ace propagation properties
if self.dacl_type in ["file", "registry", "registry32"]:
ace_prop = ace[0][1]
# Remove the inherited ace flag and get propagation
if inherited:
ace_prop = ace[0][1] ^ win32security.INHERITED_ACE
# Lookup the propagation
try:
ace_prop = self.ace_prop[self.dacl_type][ace_prop]
except KeyError:
ace_prop = "Unknown propagation"
# Get the object type
obj_type = "registry" if self.dacl_type == "registry32" else self.dacl_type
# Get the ace permissions
# Check basic permissions first
ace_perms = self.ace_perms[obj_type]["basic"].get(ace[1], [])
# If it didn't find basic perms, check advanced permissions
if not ace_perms:
ace_perms = []
for perm in self.ace_perms[obj_type]["advanced"]:
# Don't match against the string perms
if isinstance(perm, six.string_types):
continue
if ace[1] & perm == perm:
ace_perms.append(self.ace_perms[obj_type]["advanced"][perm])
ace_perms.sort()
# If still nothing, it must be undefined
if not ace_perms:
ace_perms = ["Undefined Permission: {0}".format(ace[1])]
return (
principal,
ace_type,
ace_prop,
ace_perms,
"Inherited" if inherited else "Not Inherited",
)
def rm_ace(self, principal, ace_type="all"):
"""
Remove a specific ACE from the DACL.
Args:
principal (str):
The user whose ACE to remove. Can be the user name or a SID.
ace_type (str):
The type of ACE to remove. If not specified, all ACEs will
be removed. Default is 'all'. Valid options are:
- 'grant'
- 'deny'
- 'all'
Returns:
list: List of removed aces
Usage:
.. code-block:: python
dacl = Dacl(obj_name='C:\\temp', obj_type='file')
dacl.rm_ace('Users')
dacl.save(obj_name='C:\\temp')
"""
sid = get_sid(principal)
offset = 0
ret = []
for i in range(0, self.dacl.GetAceCount()):
ace = self.dacl.GetAce(i - offset)
# Is the inherited ace flag present
inherited = ace[0][1] & win32security.INHERITED_ACE == 16
if ace[2] == sid and not inherited:
if (
self.ace_type[ace[0][0]] == ace_type.lower()
or ace_type == "all"
):
self.dacl.DeleteAce(i - offset)
ret.append(self._ace_to_dict(ace))
offset += 1
if not ret:
ret = ["ACE not found for {0}".format(principal)]
return ret
def save(self, obj_name, protected=None):
"""
Save the DACL
Args:
obj_name (str):
The object for which to set permissions. This can be the
path to a file or folder, a registry key, printer, etc. For
more information about how to format the name see:
https://msdn.microsoft.com/en-us/library/windows/desktop/aa379593(v=vs.85).aspx
protected (Optional[bool]):
True will disable inheritance for the object. False will
enable inheritance. None will make no change. Default is
``None``.
Returns:
bool: True if successful, Otherwise raises an exception
Usage:
.. code-block:: python
dacl = Dacl(obj_type='file')
dacl.save('C:\\Temp', True)
"""
sec_info = self.element["dacl"]
if protected is not None:
if protected:
sec_info = sec_info | self.inheritance["protected"]
else:
sec_info = sec_info | self.inheritance["unprotected"]
if self.dacl_type in ["registry", "registry32"]:
obj_name = self.get_reg_name(obj_name)
try:
win32security.SetNamedSecurityInfo(
obj_name,
self.obj_type[self.dacl_type],
sec_info,
None,
None,
self.dacl,
None,
)
except pywintypes.error as exc:
raise CommandExecutionError(
"Failed to set permissions: {0}".format(obj_name), exc.strerror
)
return True
return Dacl(obj_name, obj_type) | 8427940cd180eb61a1ba52b4d9459466cda26ca6 | 9,312 |
from typing import List
def max_crossing_sum(lst: List[int], mid: int, n: int) -> int:
"""
Parameter <mid> is the floor middle index of <lst>.
Parameter <n> is the length of the input list <lst>.
Pre: <lst> is a list of integers and len(lst) >= 2.
Post: returns the maximum contiguous crossing sum starting from the middle of <lst>.
>>> max_crossing_sum([2, -5, 8, -6, 10, -2], 3, 6)
12
"""
left_sum, right_sum, total = 0, 0, 0 # initialize values
# max sum of the left half
k = mid - 1
i = 0
while i < mid:
total += lst[k - i]
i += 1
if total > left_sum:
left_sum = total
# # max sum the left half
# for i in range(mid - 1, -1, -1): # iterate from index mid - 1...0 backward
# total += lst[i]
# if total > left_sum:
# left_sum = total
total = 0
# max sum the right half
for i in range(mid, n): # iterate from index mid...n - 1
total += lst[i]
if total > right_sum:
right_sum = total
# note: left_sum and right_sum are each at least zero
return left_sum + right_sum | 3d873907cb7ed0c14152ec3c2e92a742bd52aa85 | 9,313 |
def _kubeconfig_impl(repository_ctx):
"""Find local kubernetes certificates"""
# find and symlink kubectl
kubectl = repository_ctx.which("kubectl")
if not kubectl:
fail("Unable to find kubectl executable. PATH=%s" % repository_ctx.path)
repository_ctx.symlink(kubectl, "kubectl")
# TODO: figure out how to use BUILD_USER
if "USER" in repository_ctx.os.environ:
user = repository_ctx.os.environ["USER"]
else:
exec_result = repository_ctx.execute(["whoami"])
if exec_result.return_code != 0:
fail("Error detecting current user")
user = exec_result.stdout.rstrip()
token = None
ca_crt = None
kubecert_cert = None
kubecert_key = None
server = repository_ctx.attr.server
# check service account first
serviceaccount = repository_ctx.path("/var/run/secrets/kubernetes.io/serviceaccount")
if serviceaccount.exists:
ca_crt = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
token_file = serviceaccount.get_child("token")
if token_file.exists:
exec_result = repository_ctx.execute(["cat", token_file.realpath])
if exec_result.return_code != 0:
fail("Error reading user token")
token = exec_result.stdout.rstrip()
# use master url from the environemnt
if "KUBERNETES_SERVICE_HOST" in repository_ctx.os.environ:
server = "https://%s:%s" % (
repository_ctx.os.environ["KUBERNETES_SERVICE_HOST"],
repository_ctx.os.environ["KUBERNETES_SERVICE_PORT"],
)
else:
# fall back to the default
server = "https://kubernetes.default"
else:
home = repository_ctx.path(repository_ctx.os.environ["HOME"])
certs = home.get_child(".kube").get_child("certs")
ca_crt = certs.get_child("ca.crt").realpath
kubecert_cert = certs.get_child("kubecert.cert")
kubecert_key = certs.get_child("kubecert.key")
# config set-cluster {cluster} \
# --certificate-authority=... \
# --server=https://dev3.k8s.tubemogul.info:443 \
# --embed-certs",
_kubectl_config(repository_ctx, [
"set-cluster",
repository_ctx.attr.cluster,
"--server",
server,
"--certificate-authority",
ca_crt,
])
# config set-credentials {user} --token=...",
if token:
_kubectl_config(repository_ctx, [
"set-credentials",
user,
"--token",
token,
])
# config set-credentials {user} --client-certificate=... --embed-certs",
if kubecert_cert and kubecert_cert.exists:
_kubectl_config(repository_ctx, [
"set-credentials",
user,
"--client-certificate",
kubecert_cert.realpath,
])
# config set-credentials {user} --client-key=... --embed-certs",
if kubecert_key and kubecert_key.exists:
_kubectl_config(repository_ctx, [
"set-credentials",
user,
"--client-key",
kubecert_key.realpath,
])
# export repostory contents
repository_ctx.file("BUILD", """exports_files(["kubeconfig", "kubectl"])""", False)
return {
"cluster": repository_ctx.attr.cluster,
"server": repository_ctx.attr.server,
} | 5638af9fd059593b228aab5e6c4eca092759ce31 | 9,314 |
def getPrimaryHostIp():
"""
Tries to figure out the primary (the one with default route), local
IPv4 address.
Returns the IP address on success and otherwise '127.0.0.1'.
"""
#
# This isn't quite as easy as one would think. Doing a UDP connect to
# 255.255.255.255 turns out to be problematic on solaris with more than one
# network interface (IP is random selected it seems), as well as linux
# where we've seen 127.0.1.1 being returned on some hosts.
#
# So a modified algorithm first try a known public IP address, ASSUMING
# that the primary interface is the one that gets us onto the internet.
# If that fails, due to routing or whatever, we try 255.255.255.255 and
# then finally hostname resolution.
#
sHostIp = getPrimaryHostIpByUdp('8.8.8.8');
if sHostIp.startswith('127.'):
sHostIp = getPrimaryHostIpByUdp('255.255.255.255');
if sHostIp.startswith('127.'):
sHostIp = getPrimaryHostIpByHostname();
return sHostIp; | 127eeb80c21f766c3b877fc6fdfc05aed9bf50ca | 9,315 |
from re import DEBUG
def run(raw_args):
"""
Parse arguments in parameter. Then call the function registered in the
argument parser which matches them.
:param raw_args:
:return:
"""
if "--version" in raw_args:
print("version: ", __version__)
return error.ReturnCode.success.value
parser = build_cli_interface()
args = parser.parse_args()
if args.v:
logger.set_global_level(INFO)
if args.vv:
logger.set_global_level(DEBUG)
if args.quiet:
logger.disable_logs()
if "func" in args:
try:
args.func(args)
except error.ConfigError as e:
logger.LOGGER.error(e)
return error.ReturnCode.config_error.value
except error.ArtefactError as e:
logger.LOGGER.error(e)
return error.ReturnCode.artefact_error.value
except error.ExpressionError as e:
logger.LOGGER.error(e)
return error.ReturnCode.expression_error.value
except IOError as e:
logger.LOGGER.error(e)
return error.ReturnCode.artefact_error.value
except botocore.exceptions.ClientError as e:
logger.LOGGER.error("S3 error: %s" % e)
return error.ReturnCode.s3_error.value
except KeyboardInterrupt:
logger.LOGGER.info("Interrupted")
return error.ReturnCode.success.value | 85c5a8a6ff87e8bee670627f8ce0c16ebb44b083 | 9,316 |
def localize(_bot, _msg, *args, _server=None, _channel=None, **kwargs):
""" Localize message to current personality, if it supports it. """
global messages
# Find personality and check if personality has an alternative for message.
personality = config.get('personality', _server or _current_server, _channel or _current_channel)
if personality and personality in messages_ and _msg in messages_[personality]:
# Replace message.
_msg = messages_[personality][_msg]
kw = _bot.FORMAT_CODES.copy()
kw.update(kwargs)
return _msg.format(*args, **kw) | ba2300388afee37d4bf40dc2ac9fc6f4f04731fa | 9,317 |
import argparse
def parse_arguments():
"""
Parse the argument list and return the location of a geometry file, the
location of a data file, whether or not to save images with a timestamp of
the four default plot windows and the VisIt session file in the current
directory, and whether or not to open the session file in VisIt.
Input:
______
none
Returns:
________
args: Namespace
User supplied geometry file location, data file location, and
indication if the user wants images of the plot windows with a
timestamp and the session file saved and opened in VisIt.
"""
parser = argparse.ArgumentParser(description="Create default VisIt output.")
parser.add_argument("geofile",
type=str,
help="Provide a path to the geometry file."
)
parser.add_argument("datafile",
type=str,
help="Provide a path to the data file."
)
parser.add_argument("-i", "--images",
action="store_true",
help="Indicate whether to save images of plot windows."
)
parser.add_argument("-t", "--timestamp",
action="store_true",
help="Indicate whether to remove the timestamp from images."
)
parser.add_argument("-s", "--sessionfile",
action="store_true",
help="Indicate whether to save the VisIt session file."
)
parser.add_argument("-v", "--openvisit",
action="store_false",
help="Indicate whether to open the session file in VisIt."
)
args = parser.parse_args()
return args | 5ba0e6e65801cfc93cc2864368eb2fac4b75e840 | 9,318 |
def list_events():
"""Show a view with past and future events."""
if "username" not in session:
return redirect("/")
events = actions.get_upcoming_events()
past_events = actions.get_past_events()
return render_template("events.html", count=len(events), past_count=len(past_events),
events=events, past_events=past_events, events_view=True, mode="3") | a4ab3207943ccd302aab6a0785de4cc4a4609994 | 9,319 |
import argparse
import os
def create_parser():
"""Create argparser."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--mode', default='local', choices=['local', 'docker'])
parser.add_argument(
'--env-file', action="append", help='Job specific environment file')
parser.add_argument(
'--image-family',
help='The image family from which to fetch the latest image')
parser.add_argument(
'--image-project',
help='The image project from which to fetch the test images')
parser.add_argument(
'--aws', action='store_true', help='E2E job runs in aws')
parser.add_argument(
'--aws-ssh',
default=os.environ.get('JENKINS_AWS_SSH_PRIVATE_KEY_FILE'),
help='Path to private aws ssh keys')
parser.add_argument(
'--aws-pub',
default=os.environ.get('JENKINS_AWS_SSH_PUBLIC_KEY_FILE'),
help='Path to pub aws ssh key')
parser.add_argument(
'--aws-cred',
default=os.environ.get('JENKINS_AWS_CREDENTIALS_FILE'),
help='Path to aws credential file')
parser.add_argument(
'--gce-ssh',
default=os.environ.get('JENKINS_GCE_SSH_PRIVATE_KEY_FILE'),
help='Path to .ssh/google_compute_engine keys')
parser.add_argument(
'--gce-pub',
default=os.environ.get('JENKINS_GCE_SSH_PUBLIC_KEY_FILE'),
help='Path to pub gce ssh key')
parser.add_argument(
'--service-account',
default=os.environ.get('GOOGLE_APPLICATION_CREDENTIALS'),
help='Path to service-account.json')
parser.add_argument(
'--mount-paths',
action='append',
help='Paths that should be mounted within the docker container in the form local:remote')
parser.add_argument(
'--build', nargs='?', default=None, const='',
help='Build kubernetes binaries if set, optionally specifying strategy')
parser.add_argument(
'--cluster', default='bootstrap-e2e', help='Name of the cluster')
parser.add_argument(
'--docker-in-docker', action='store_true', help='Enable run docker within docker')
parser.add_argument(
'--kubeadm', choices=['ci', 'periodic', 'pull'])
parser.add_argument(
'--tag', default='v20170707-6440bde9', help='Use a specific kubekins-e2e tag if set')
parser.add_argument(
'--test', default='true', help='If we need to run any actual test within kubetest')
parser.add_argument(
'--down', default='true', help='If we need to tear down the e2e cluster')
parser.add_argument(
'--up', default='true', help='If we need to bring up a e2e cluster')
parser.add_argument(
'--kubetest_args',
action='append',
default=[],
help='Send unrecognized args directly to kubetest')
return parser | 8064afedf3273e21b69c130d8ba48852490cb6af | 9,320 |
def get_duration(df):
"""Get duration of ECG recording
Args:
df (DataFrame): DataFrame with time/voltage data
Returns:
float: duration of ECG recording
"""
start = df.time.iloc[0]
end = df.time.iloc[-1]
duration = end - start
return duration | 77698afc8ef7af557628d5fea760dc101c3e6112 | 9,321 |
def conv_seq_labels(xds, xhs):
"""description and hedlines are converted to padded input vectors. headlines are one-hot to label"""
batch_size = len(xhs)
assert len(xds) == batch_size
def process_xdxh(xd,xh):
concated_xd = xd+[[3]]+xh
padded_xd = lpadd(concated_xd,maxlend)
concated_xdxh = concat_output(padded_xd)
return vocab_fold_list(concated_xdxh)
x_raw = [process_xdxh(xd,xh) for xd,xh in zip(xds,xhs)] # the input does not have 2nd eos
x = np.asarray([sequence.pad_sequences(_x, maxlen=maxlen, value=empty, padding='post', truncating='post') for _x in x_raw])
#x = flip_headline(x, nflips=nflips, model=model, debug=debug)
def padeod_xh(xh):
if [2] in xh:
return xh+[[0]]
else:
return xh+[[2]]
y = np.zeros((batch_size, maxhighs+1, maxlenh, vocab_size))
xhs_fold = [vocab_fold_list(padeod_xh(xh)) for xh in xhs]
def process_xh(xh):
if sum(xh)>0:
xh_pad = xh + [eos] + [empty]*maxlenh # output does have a eos at end
else:
xh_pad = xh + [empty]*maxlenh
xh_truncated = xh_pad[:maxlenh]
return np_utils.to_categorical(xh_truncated, vocab_size)
for i, xh in enumerate(xhs_fold):
y[i,:,:,:] = np.asarray([process_xh(xh) for xh in xhs_fold[i]])
return x, y.reshape((batch_size,(maxhighs+1)*maxlenh,vocab_size)) | e8a70797ae1fa7eaf50c96bd072614aff6417b80 | 9,322 |
import json
def create_task():
"""Create new post"""
global post_id_counter
body = json.loads(request.data)
title = body.get("title")
link = body.get("link")
username = body.get("username")
if not title or not link or not username:
return json.dumps({"error": "Missing fields in the body"}), 400
post = {
"id": post_id_counter,
"upvotes": 1,
"title": title,
"link": link,
"username": username,
"comments": {}
}
posts[post_id_counter] = post
post_id_counter += 1
return json.dumps(post), 201 | bace1881a104e41d83842992fc7818f2c2a213ac | 9,323 |
def _asklong(*args):
"""_asklong(sval_t value, char format, v(...) ?) -> int"""
return _idaapi._asklong(*args) | f80d4db85461cd3e13de2cfc6006385419729bec | 9,324 |
def describe_bivariate(data:pd.DataFrame,
only_dependent:bool = False,
size_max_sample:int = None,
is_remove_outliers:bool = True,
alpha:float = 0.05,
max_num_rows:int = 5000,
max_size_cats:int = 5,
verbose:bool = False)->pd.DataFrame:
"""
Describe bivariate relationships.
df -- data to be analized.
only_dependent -- only display relationships with dependeces (default, False).
size_max_sample -- maximum sample size to apply analysis with whole sample. If this value
is not None are used random subsamples although it will not remove bivariate
outliers (default, None).
is_remove_outliers -- Remove or not univariate outliers (default, True).
alpha -- significance level (default, 0.05).
max_num_rows -- maximum number of rows allowed without considering a sample (default, 5000).
max_size_cats -- maximum number of possible values in a categorical variable to be allowed (default, 5).
return -- results in a table.
"""
# data preparation
df = preparation(data, max_num_rows, max_size_cats, verbose = True)
# relationship num - num
dfnn = analysis_num_num(df, only_dependent = only_dependent, size_max_sample = size_max_sample,
is_remove_outliers = is_remove_outliers, alpha = alpha, verbose = verbose)
# relationship cat - cat
dfcc = analysis_cat_cat(df, only_dependent = only_dependent, alpha = alpha, verbose = verbose)
# relationship cat - num
dfcn = analysis_cat_num(df, only_dependent = only_dependent, alpha = alpha,
is_remove_outliers = is_remove_outliers, verbose = verbose)
# append results
dfbiv = dfnn.copy()
dfbiv = dfbiv.append(dfcc)
dfbiv = dfbiv.append(dfcn)
# return
return dfbiv | 4754b106cab60dd02ab32b0705802d9459c28593 | 9,325 |
import click
import subprocess
def launch(cmd, args=None, separate_terminal=False, in_color='cyan', silent=False, should_wait=True):
"""
Launch a system command
:param cmd: The command to run
:param args: The arguments to pass to that command (a str list)
:param separate_terminal: Should we open a new terminal window
:param in_color: The color to output
:param silent: Echo the system command to the current stdout?
:param should_wait: In the case of a separate terminal, should we wait for that to finish?
:return: The error code returned from the command. If not wait to complete, this will only return 0.
"""
if args is None:
args = []
args_in = [cmd]
if separate_terminal or not should_wait:
pre_args = ['start']
if should_wait:
pre_args.append('/wait')
pre_args.append(cmd)
pre_args.extend(args)
args_in = pre_args
else:
args_in.extend(args)
if not silent:
click.secho(' '.join(args_in), fg=in_color)
return subprocess.call(args_in, shell=separate_terminal or not should_wait) | 48de0ef8b80973fede05444ec78ab09de6b783b9 | 9,326 |
def devilry_multiple_examiners_short_displayname(assignment, examiners, devilryrole):
"""
Returns the examiners wrapped in HTML formatting tags perfect for showing
the examiners inline in a non-verbose manner.
Typically used for showing all the examiners in an
:class:`devilry.apps.core.models_group.AssignmentGroup`.
Handles anonymization based on ``assignment.anonymizationmode`` and ``devilryrole``.
Args:
assignment: A :class:`devilry.apps.core.models.Assignment` object.
The ``assignment`` should be the assignment where the examiners belongs.
examiners: An iterable of :class:`devilry.apps.core.models.Examiner` objects.
devilryrole: See
:meth:`devilry.apps.core.models.Assignment.examiners_must_be_anonymized_for_devilryrole`.
"""
return {
'assignment': assignment,
'examiners': examiners,
'devilryrole': devilryrole,
} | 4afa278f115a2a99ee2f922ef15dd8507293d3cc | 9,327 |
import seaborn
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb, hex2color
def colormap_with_fixed_hue(color, N=10):
"""Create a linear colormap with fixed hue
Parameters
----------
color: tuple
color that determines the hue
N: int, optional
number of colors used in the palette
"""
color_hsv = rgb_to_hsv(hex2color(color))
base = seaborn.color_palette("Blues", 10)
base_hsv = np.array(list(map(rgb_to_hsv, base)))
h, s, v = base_hsv.T
h_fixed = np.ones_like(h) * color_hsv[0]
color_array = np.array(list(map(
hsv_to_rgb, np.vstack([h_fixed, s * color_hsv[1], v]).T)))
return LinearSegmentedColormap.from_list("mycmap", color_array) | 2d6e7d1bc5f919a01bf9871ed9422cd847cc5a99 | 9,328 |
import json
def get_news_blacklist() -> list:
"""Get the users news blacklist from news-blacklist.json.
Returns:
list: List of blacklisted news article titles
"""
try:
with open("news-blacklist.json", encoding="utf-8") as file:
log.info("Getting news blacklist from news-blacklist.json")
user_blacklist = json.load(file)
except FileNotFoundError:
log.warning("No news-blacklist.json found, creating a new one")
user_blacklist = {"blacklist": []}
with open("news-blacklist.json", "w", encoding="utf-8") as file:
json.dump(user_blacklist, file)
return user_blacklist["blacklist"] | b25f2c619e5767d8238e95277e691264eb0682df | 9,329 |
def calc_triangular_number(n: int):
"""
A triangular number or triangle number counts objects
arranged in an equilateral triangle.
More info: https://www.mathsisfun.com/algebra/triangular-numbers.html
:param n:
:return:
"""
return int((n * (n + 1)) / 2) | e3bfefd6e0e9451849cee8f6da252ec128285c85 | 9,330 |
def wrap_keepdims(func):
""" Check that output have same dimensions as input. """
# TODO : check if it's working
@wraps(func)
def check_keepdims(X, *args, keepdims=False, **kwargs):
if keepdims:
out = func(X, *args, **kwargs)
return out.reshape(out.shape + (1,))
return func(X, *args, **kwargs)
return check_keepdims | ef0d7a320e9207f50b1c00d9b9359faad47e5850 | 9,331 |
def get_headers(cred=None, filename=None):
"""Return headers for basic HTTP authentication.
Returns:
str: Basic authorization header, including Base64 encoded
username and password.
"""
return {
"Authorization": "Basic {}".format(
get_base64(cred=cred, filename=filename, api="reporting")
)
} | 17a8c941044487a334070d70d9d93071898a31f5 | 9,332 |
def create_xml_content(
segmentation: list[dict],
lang_text: list[str],
split: str,
src_lang: str,
tgt_lang: str,
is_src: bool,
) -> list[str]:
"""
Args:
segmentation (list): content of the yaml file
lang_text (list): content of the transcription or translation txt file
split (str): the split name
src_lang (str): source language id
tgt_lang (str): target language id
is_src (bool): whether lang_text is transcriptions
Returns:
xml_content (list)
"""
xml_content = []
xml_content.append('<?xml version="1.0" encoding="UTF-8"?>')
xml_content.append("<mteval>")
if is_src:
xml_content.append(f'<srcset setid="{split}" srclang="{src_lang}">')
else:
xml_content.append(
f'<refset setid="{split}" srclang="{src_lang}" trglang="{tgt_lang}" refid="ref">'
)
prev_talk_id = -1
for sgm, txt in zip(segmentation, lang_text):
talk_id = sgm["wav"].split(".wav")[0]
if prev_talk_id != talk_id:
if prev_talk_id != -1:
xml_content.append("</doc>")
# add content (some does not matter, but is added to replicate the required format)
xml_content.append(f'<doc docid="{talk_id}" genre="lectures">')
xml_content.append("<keywords>does, not, matter</keywords>")
xml_content.append("<speaker>Someone Someoneson</speaker>")
xml_content.append(f"<talkid>{talk_id}</talkid>")
xml_content.append("<description>Blah blah blah.</description>")
xml_content.append("<title>Title</title>")
seg_id = 0
prev_talk_id = talk_id
seg_id += 1
xml_content.append(f'<seg id="{seg_id}">{txt}</seg>')
xml_content.append("</doc>")
if is_src:
xml_content.append("</srcset>")
else:
xml_content.append("</refset>")
xml_content.append("</mteval")
return xml_content | 6af6b5fcdaccd5bd81ad202bdb22fad3910afc2b | 9,333 |
def style_string(string: str, fg=None, stylename=None, bg=None) -> str:
"""Apply styles to text.
It is able to change style (like bold, underline etc), foreground and background colors of text string."""
ascii_str = _names2ascii(fg, stylename, bg)
return "".join((
ascii_str,
string,
_style_dict["reset"])) | 6d61c33a632c88609cb551ae0a1d55d8ee836937 | 9,334 |
def select_all_genes():
"""
Select all genes from SQLite database
"""
query = """
SELECT GENE_SYMBOL, HGNC_ID, ENTREZ_GENE_ID, ENSEMBL_GENE, MIM_NUMBER FROM GENE
"""
cur = connection.cursor()
cur.execute(query)
rows = cur.fetchall()
genes = []
for row in rows:
omim = row[4].split(';') if row[4] != "None" else []
gene = Gene(gene_symbol=row[0], hgnc_id=row[1], entrez_gene_id=row[2], ensembl_gene=row[3], omim=omim)
genes.append(gene)
cur.close()
return genes | fb73e890d62f247939c1aa9a1e16a8e5f5a75866 | 9,335 |
def test_enum_handler(params):
""" 测试枚举判断验证
"""
return json_resp(params) | c3a4a9589b5d06813d6afaa55c8f6d9fafa80252 | 9,336 |
def get_staff_timetable(url, staff_name):
"""
Get Staff timetable via staff name
:param url: base url
:param staff_name: staff name string
:return: a list of dicts
"""
url = url + 'TextSpreadsheet;Staff;name;{}?template=SWSCUST+Staff+TextSpreadsheet&weeks=1-52' \
'&days=1-7&periods=1-32&Width=0&Height=0'.format(staff_name)
course_list, name = extract_text_spread_sheet(url, lambda _: False)
for course in course_list:
course['Name of Type'] = course['Module']
course['Module'] = course['Description']
return course_list, name | 0e52604c08bef70d5cfc1fc889c8ced766f49ae5 | 9,337 |
def find_ccs(unmerged):
"""
Find connected components of a list of sets.
E.g.
x = [{'a','b'}, {'a','c'}, {'d'}]
find_cc(x)
[{'a','b','c'}, {'d'}]
"""
merged = set()
while unmerged:
elem = unmerged.pop()
shares_elements = False
for s in merged.copy():
if not elem.isdisjoint(s):
merged.remove(s)
merged.add(frozenset(s.union(elem)))
shares_elements = True
if not shares_elements:
merged.add(frozenset(elem))
return [list(x) for x in merged] | 4bff4cc32237dacac7737ff509b4a68143a03914 | 9,338 |
def read_match_df(_url: str, matches_in_section: int=None) -> pd.DataFrame:
"""各グループの試合リスト情報を自分たちのDataFrame形式で返す
JFA形式のJSONは、1試合の情報が下記のような内容
{'matchTypeName': '第1節',
'matchNumber': '1', # どうやら、Competitionで通しの番号
'matchDate': '2021/07/22', # 未使用
'matchDateJpn': '2021/07/22',
'matchDateWeek': '木', # 未使用
'matchTime': '20:00', # 未使用
'matchTimeJpn': '20:00',
'venue': '東京スタジアム',
'venueFullName': '東京/東京スタジアム', # 未使用
'homeTeamName': '日本',
'homeTeamQualificationDescription': '', # 未使用
'awayTeamName': '南アフリカ',
'awayTeamQualificationDescription': '', # 未使用
'score': {
'homeWinFlag': False, # 未使用
'awayWinFlag': False, # 未使用
'homeScore': '',
'awayScore': '',
'homeTeamScore1st': '', # 未使用 前半得点
'awayTeamScore1st': '', # 未使用 前半得点
'homeTeamScore2nd': '', # 未使用 後半得点
'awayTeamScore2nd': '', # 未使用 後半得点
'exMatch': False,
'homeTeamScore1ex': '', # 未使用 延長前半得点
'awayTeamScore1ex': '', # 未使用 延長前半得点
'homeTeamScore2ex': '', # 未使用 延長後半得点
'awayTeamScore2ex': '', # 未使用 延長後半得点
'homePKScore': '', # 未使用 PK得点
'awayPKScore': '' # 未使用 PK得点
},
'scorer': {
'homeScorer': [], # 未使用
'awayScorer': [] # 未使用
},
'matchStatus': '',
'officialReportURL': '' # 未使用
}
"""
match_list = read_match_json(_url)[SCHEDULE_CONTAINER_NAME][SCHEDULE_LIST_NAME]
# print(match_list)
result_list = []
match_index_dict = {}
for (_count, _match_data) in enumerate(match_list):
_row = {}
for (target_key, org_key) in REPLACE_KEY_DICT.items():
_row[target_key] = _match_data[org_key]
for (target_key, org_key) in SCORE_DATA_KEY_LIST.items():
_row[target_key] = _match_data['score'][org_key]
_regexp_result = SECTION_NO.match(_row['section_no'])
if _regexp_result:
section_no = _regexp_result[1]
elif matches_in_section is not None: # 節数の記載が無く、節ごとの試合数が分かっている時は計算
section_no = int(_count / matches_in_section) + 1
else: # 節数不明
section_no = 0
_row['section_no'] = section_no
if section_no not in match_index_dict:
match_index_dict[section_no] = 1
else:
match_index_dict[section_no] += 1
_row['match_index_in_section'] = match_index_dict[section_no]
# U18高円宮杯プリンス関東リーグでの中止情報は、なぜか 'venueFullName' に入っていたので暫定対応
if '【中止】' in _match_data['venueFullName']:
print('Cancel Game## ' + _match_data['venueFullName'])
_row['status'] = '試合中止'
else:
print('No Cancel## ' + _match_data['venueFullName'])
result_list.append(_row)
return pd.DataFrame(result_list) | 0dae5f1669c3e1a1a280967bc75780a7b1aa91a0 | 9,339 |
import re
def tokenize(text):
"""Tokenise text with lemmatizer and case normalisation.
Args:
text (str): text required to be tokenized
Returns:
list: tokenised list of strings
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens | 56c7dc6ce557257f8716bd502958093eb01a8c50 | 9,340 |
def reinforce_loss_discrete(classification_logits_t,
classification_labels_t,
locations_logits_t,
locations_labels_t,
use_punishment=False):
"""Computes REINFORCE loss for contentious discrete action spaces.
Args:
classification_logits_t: List of classification logits at each time point.
classification_labels_t: List of classification labels at each time point.
locations_logits_t: List of location logits at each time point.
locations_labels_t: List of location labels at each time point.
use_punishment: (Boolean) Reward {-1, 1} if true else {0, 1}.
Returns:
reinforce_loss: REINFORCE loss.
"""
classification_logits = tf.concat(classification_logits_t, axis=0)
classification_labels = tf.concat(classification_labels_t, axis=0)
locations_logits = tf.concat(locations_logits_t, axis=0)
locations_labels = tf.concat(locations_labels_t, axis=0)
rewards = tf.cast(
tf.equal(
tf.argmax(classification_logits, axis=1,
output_type=classification_labels.dtype),
classification_labels), dtype=tf.float32) # size (batch_size) each
if use_punishment:
# Rewards is \in {-1 and 1} instead of {0, 1}.
rewards = 2. * rewards - 1.
neg_advs = tf.stop_gradient(rewards - tf.reduce_mean(rewards))
log_prob = -tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=locations_logits, labels=locations_labels)
loss = -tf.reduce_mean(neg_advs * log_prob)
return loss | 7296f0647d792ce0698cd48d2b56e30941ca1afb | 9,341 |
import os
def train2(num,base_path=base_path):
"""
this function is used to process train.yzbx.txt format
"""
#train_data_file="/home/zyyang/RS/train.yzbx.txt"
train_data_file=os.path.join(base_path,num,'train.yzbx.txt')
b_data=defaultdict(list)
fi=open(train_data_file,'r')
size=0
maxb=0
for line in fi:
s=line.strip().split()
b=int(s[2])
maxb= max(b,maxb)
o=b>int(s[1])
o=int(o)
b_data[b].append(o)
size+=1
fi.close()
b_data=sorted(b_data.items(),key=lambda e:e[0],reverse=False)
b_data=dict(b_data)
bdns=[]
wins=0
for z in b_data:
wins=sum(b_data[z])
b=z
d=wins
n=size
bdn=[b,d,n]
bdns.append(bdn)
size-=len(b_data[z])
zw_dict={}
min_p_w=0
bdns_length=len(bdns)
count=0
p_l_tmp=1.0
for bdn in bdns:
count+=1
b=float(bdn[0])
d=float(bdn[1])
n=float(bdn[2])
if count<bdns_length:
p_l_tmp*=(n-d)/n
p_l=p_l_tmp
p_w=max(1.0-p_l,min_p_w)
zw_dict[int(b)]=p_w
#print(zw_dict)
return zw_dict,maxb | 5cdaed452c2087161a9ccdd7c06735b025aed0db | 9,342 |
import os
import shutil
def analyze(binObj, task='skewer', frange=None, distort=True, CenAlpha=None,
histbin=False, statistic='mean', suffix='temp', overwrite=False,
skewer_index=None, zq_cut=[0, 5], parallel=False, tt_bins=None,
verbose=True, nboot=100, calib_kwargs=None, skewer_kwargs=None):
"""
Function to perform important operations on the binObj
Parameters:
binObj: An instance of the bin_class
task: one of ["data_points", "calibrate", "composite", "skewer"]
frange: the Lyman Alpha forest ranges used for the analysis
distort: warp the spectra to a common spectral index
CenAlpha: the common spectral index to warp to
histbin: perform histogram rebinninb
statistic: statistic to use when creating composites [task="composite"]
suffix: name of the file to write to
overwrite: overwrite the skewer in the LogLikes folder if duplicates
skewer_index: index of the skewers in the forest range (frange) to use
zq_cut: allows to perform a cut in the quasar redshift
parallel: whether to run the skewers in parallel
tt_bins: Bins in lyman alpha redshift to use for task="data_points"
calib_kwargs: additional keyword arguments for task="calibrate"
skewer_kwargs: additional keyword arguments for task="skewer"
"""
if frange is None:
frange = [1070, 1160]
lyInd = np.where((binObj.wl > frange[0]) & (binObj.wl < frange[1]))[0]
if skewer_index is None:
skewer_index = range(len(lyInd))
else:
skewer_index = np.atleast_1d(skewer_index)
outfile = task + '_' + suffix
if task == 'skewer' or task == 'data_points':
if verbose:
print('Total skewers available: {}, skewers analyzed in this '
'run: {}'.format(len(lyInd), len(skewer_index)))
myspec = binObj._flux[:, lyInd[skewer_index]]
myivar = binObj._ivar[:, lyInd[skewer_index]]
zMat = binObj._zAbs[:, lyInd[skewer_index]]
mywave = binObj.wl[lyInd[skewer_index]]
else:
myspec, myivar, zMat = binObj._flux, binObj._ivar, binObj._zAbs
mywave = binObj.wl
myz, myalpha = binObj._zq, binObj._alpha
# selecting according to quasar redshifts
zq_mask = (myz > zq_cut[0]) & (myz < zq_cut[1])
myspec = myspec[zq_mask]
myivar = myivar[zq_mask]
zMat = zMat[zq_mask]
myz, myalpha = myz[zq_mask], myalpha[zq_mask]
# B. DATA PREPROCESSING ---------------------------------------------------
if histbin:
# Histogram binning in parameter space
myp1, myp2 = binObj._par1, binObj._par2
myzbins = find_zbins(myz)
hInd = np.where((myz >= myzbins[0]) & (myz < myzbins[-1]))
# Modify the selection to choose only objects that fall in the
# zbins range
myz, myalpha = myz[hInd], myalpha[hInd]
myp1, myp2 = myp1[hInd], myp2[hInd]
myspec, myivar = myspec[hInd], myivar[hInd]
zMat = zMat[hInd]
if binObj._hWeights is None:
h_weights = hist_weights(myp1, myp2, myz, myzbins)
binObj._hWeights = h_weights
myivar = myivar * h_weights[:, None]
else:
myivar = myivar * binObj._hWeights[:, None]
if distort:
# Distort spectra in alpha space
outfile += '_distort'
if CenAlpha is None:
CenAlpha = np.median(myalpha)
distortMat = np.array([(mywave / 1450.) ** ele for
ele in (CenAlpha - myalpha)])
myspec *= distortMat
myivar /= distortMat ** 2
if verbose:
print('All spectra distorted to alpha:', CenAlpha)
# C. CALIBRATION VS ESTIMATION --------------------------------------------
if task == "data_points":
print("Make sure that the reconstructed continuum has been run using "
"the same frange as that being used right now!")
# Data points for the transmission, using a continuum as the base
if binObj.continuum is None:
raise AttributeError("Set the reconstructed continuum for the"
"bin first!!!")
ivar_mask = (myivar > 0).flatten()
zLyAs = zMat.flatten()
zLyAs = zLyAs[ivar_mask]
# bin centers for the redshift-transmission plot
if tt_bins is None:
tt_bins = np.linspace(zLyAs.min(), zLyAs.max(), 40)
tt_cens = (tt_bins[1:] + tt_bins[:-1]) / 2.
# errors from t0-gamma fluctuations
# We are not going to use this in the paper !!!
tt_binned = np.zeros((len(binObj.continuum), len(tt_cens)))
for i in range(len(binObj.continuum)):
tt = (myspec / binObj.continuum[i]).flatten()
tt = tt[ivar_mask]
tt_binned[i] = binned_statistic(zLyAs, tt, statistic=np.mean,
bins=tt_bins).statistic
continuum = binObj.continuum.mean(0)
# estimates of the transmission central values - errors obtained
# using bootstrap as below
tt_cen = (myspec / continuum).flatten()
tt_cen = tt_cen[ivar_mask]
tt_data = binned_statistic(zLyAs, tt_cen, statistic=np.mean,
bins=tt_bins).statistic
# tt_std = binned_statistic(zLyAs, tt_cen, statistic=np.std,
# bins=tt_bins).statistic
# tt_counts = binned_statistic(zLyAs, None, statistic='count',
# bins=tt_bins).statistic
# errors from bootstrapping
print("Computing bootstrap samples of transmission")
tt_boot = np.zeros((nboot, len(tt_cens)))
for i in range(nboot):
np.random.seed()
ixs = np.random.randint(0, len(myivar), len(myivar))
sp_boot, iv_boot = myspec[ixs], myivar[ixs]
zz_boot = zMat[ixs]
ivar_mask = (iv_boot > 0).flatten()
zLyAs = zz_boot.flatten()
zLyAs = zLyAs[ivar_mask]
tt = (sp_boot / continuum).flatten()
tt = tt[ivar_mask]
tt_boot[i] = binned_statistic(zLyAs, tt, statistic=np.mean,
bins=tt_bins).statistic
# Save this to a file for future use -
# Use this for the analysis of figure 6 <--
data_full = np.array([tt_cens, tt_data, *tt_boot])
np.savetxt("data_points_" + binObj.name + ".dat", data_full)
return tt_cens, tt_data, tt_binned, tt_boot # , tt_std / np.sqrt(tt_counts)
if task == 'calibrate':
ixs = (myz > 1.6) & (myz < 4)
print('Number of spectra used for calibration are: %d' % ixs.sum())
rest_range = [[1280, 1290], [1320, 1330], [1345, 1360], [1440, 1480]]
# normalization range used
obs_min, obs_max = 4600, 4640
corrections.calibrate(binObj.wl, myspec[ixs], myivar[ixs], myz[ixs],
rest_range, obs_min, obs_max, binObj.name, True)
# D. COMPOSITE CREATION IF SPECIFIED --------------------------------------
if task == 'composite':
# Create composites using the spectra
# zbins = find_zbins(myz)
zbins = np.arange(2.1, 4.5, 0.05)
# comp_simple.compcompute(myspec, myivar, myz, mywave,
# zbins, statistic, outfile)
create_comp.create_comp(myspec, myivar, myz,
mywave, zbins, outfile)
# E. LIKELIHOOD SKEWER ----------------------------------------------------
if task == 'skewer':
currDir = os.getcwd()
destDir = '../LogLikes' + '/Bin_' + outfile +\
str(frange[0]) + '_' + str(frange[1]) # <--
if not os.path.exists(destDir):
os.makedirs(destDir)
else:
if overwrite:
shutil.rmtree(destDir)
os.makedirs(destDir)
os.chdir(destDir)
start = timer()
# Do not plot graphs while in parallel
res = None
if parallel:
pass
# print('Running in parallel now!')
# myfunc_partial = partial(mcmc_skewer.mcmcSkewer, **skewer_kwargs)
# pool = Pool()
# res = pool.map(myfunc_partial,
# zip(np.array([zMat, myspec, myivar]).T, skewer_index))
# pool.close()
# pool.join()
# else:
# for j, ele in enumerate(skewer_index):
# res = mcmc_skewer.mcmcSkewer(
# [np.array([zMat[:, j], myspec[:, j], myivar[:, j]]).T, ele],
# **skewer_kwargs)
stop = timer()
print('Time elapsed:', stop - start)
os.chdir(currDir)
return mywave, res | 90a04f5c4bb137032a80217a10a9dcfae863f0a8 | 9,343 |
import itertools
def distances(spike_times, ii_spike_times, epoch_length=1.0, metric='SPOTD_xcorr'):
"""Compute temporal distances based on various versions of the SPOTDis, using CPU parallelization.
Parameters
----------
spike_times : numpy.ndarray
1 dimensional matrix containing all spike times
ii_spike_times : numpy.ndarray
MxNx2 dimensional matrix containing the start and end index for the spike_times array
for any given epoch and channel combination
metric : str
Pick the specific metric by combining the metric ID with either 'xcorr' to compute it on
pairwise xcorr histograms or 'times' to compute it directly on spike times.
Currently available:
* SPOTD_xcorr
* SPOTD_xcorr_pooled
* SPOTD_spikes
Returns
-------
distances : numpy.ndarray
MxM distance matrix with numpy.nan for unknown distances
"""
n_epochs = ii_spike_times.shape[0]
epoch_index_pairs = np.array(
list(itertools.combinations(range(n_epochs), 2)),
dtype=int)
# SPOTDis comparing the pairwise xcorrs of channels
if metric == 'SPOTD_xcorr':
distances, percent_nan = xcorr_spotdis_cpu_(
spike_times, ii_spike_times, epoch_index_pairs)
distances = distances / (2*epoch_length)
# SPOTDis comparing the xcorr of a channel with all other channels pooled
elif metric == 'SPOTD_xcorr_pooled':
distances, percent_nan = xcorr_pooled_spotdis_cpu_(
spike_times, ii_spike_times, epoch_index_pairs)
distances = distances / (2*epoch_length)
# SPOTDis comparing raw spike trains
elif metric == 'SPOTD_spikes':
distances, percent_nan = spike_spotdis_cpu_(
spike_times, ii_spike_times, epoch_index_pairs)
distances = distances / epoch_length
# Otherwise, raise exception
else:
raise NotImplementedError('Metric "{}" unavailable, check doc-string for alternatives.'.format(
metric))
np.fill_diagonal(distances, 0)
return distances | 3696f33929150ac2f002aa6a78822654eeb50581 | 9,344 |
def format_object_translation(object_translation, typ):
"""
Formats the [poi/event/page]-translation as json
:param object_translation: A translation object which has a title and a permalink
:type object_translation: ~cms.models.events.event.Event or ~cms.models.pages.page.Page or ~cms.models.pois.poi.POI
:param typ: The type of this object
:type typ: str
:return: A dictionary with the title, url and type of the translation object
:rtype: dict
"""
return {
"title": object_translation.title,
"url": f"{WEBAPP_URL}/{object_translation.permalink}",
"type": typ,
} | 11499d53d72e071d59176a00543daa0e8246f89a | 9,345 |
def _FormatKeyValuePairsToLabelsMessage(labels):
"""Converts the list of (k, v) pairs into labels API message."""
sorted_labels = sorted(labels, key=lambda x: x[0] + x[1])
return [
api_utils.GetMessage().KeyValue(key=k, value=v) for k, v in sorted_labels
] | 3f2dd78951f8f696c398ab906acf790d7923eb75 | 9,346 |
def gen_unique(func):
""" Given a function returning a generator, return a function returning
a generator of unique elements"""
return lambda *args: unique(func(*args)) | 703dc6f80553fc534ca1390eb2c0c3d7d81b26eb | 9,347 |
def admin_inventory(request):
"""
View to handle stocking up inventory, adding products...
"""
context = dict(product_form=ProductForm(),
products=Product.objects.all(),
categories=Category.objects.all(),
transactions=request.user.account.transaction_set.all()
)
return render(request, 'namubufferiapp/admin_handleinventory.html', context) | ec8f38947ab95f82a26fc6c6949d569a5ec83f7d | 9,348 |
def snippet_list(request):
"""
List all code snippets, or create a new snippet.
"""
print(f'METHOD @ snippet_list= {request.method}')
if request.method == 'GET':
snippets = Snippet.objects.all()
serializer = SnippetSerializer(snippets, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = SnippetSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400) | 959245f7d194470c4bccef338ead8d0b35abe1bc | 9,349 |
def generate_submission(args: ArgumentParser, submission: pd.DataFrame) -> pd.DataFrame:
"""Take Test Predictions for 4 classes to Generate Submission File"""
image, kind = args.shared_indices
df = submission.reset_index()[[image, args.labels[0]]]
df.columns = ["Id", "Label"]
df.set_index("Id", inplace=True)
df["Label"] = 1. - df["Label"]
print(f"\nSubmission Stats:\n{df.describe()}\nSubmission Head:\n{df.head()}")
return df | e5b3f1c65adbe1436d638667cbc7bae9fb8a6a1e | 9,350 |
import numba
def nearest1d(vari, yi, yo, extrap="no"):
"""Nearest interpolation of nD data along an axis with varying coordinates
Warning
-------
`nxi` must be either a multiple or a divisor of `nxo`,
and multiple of `nxiy`.
Parameters
----------
vari: array_like(nxi, nyi)
yi: array_like(nxiy, nyi)
yo: array_like(nxo, nyo)
Return
------
array_like(nx, nyo): varo
With `nx=max(nxi, nxo)`
"""
# Shapes
nxi, nyi = vari.shape
nxiy = yi.shape[0]
nxi, nyi = vari.shape
nxo, nyo = yo.shape
nx = max(nxi, nxo)
# Init output
varo = np.full((nx, nyo), np.nan, dtype=vari.dtype)
# Loop on the varying dimension
for ix in numba.prange(nx):
# Index along x for coordinate arrays
ixi = min(nxi-1, ix % nxi)
ixiy = min(nxiy-1, ix % nxiy)
ixoy = min(nxo-1, ix % nxo)
# Loop on input grid
iyimin, iyimax = get_iminmax(yi[ixiy])
iyomin, iyomax = get_iminmax(yo[ixoy])
for iyi in range(iyimin, iyimax):
# Out of bounds
if yi[ixiy, iyi+1] < yo[ixoy, iyomin]:
continue
if yi[ixiy, iyi] > yo[ixoy, iyomax]:
break
# Loop on output grid
for iyo in range(iyomin, iyomax+1):
dy0 = yo[ixoy, iyo] - yi[ixiy, iyi]
dy1 = yi[ixiy, iyi+1] - yo[ixoy, iyo]
# Above
if dy1 < 0: # above
break
# Below
if dy0 < 0:
iyomin = iyo + 1
# Interpolations
elif dy0 <= dy1:
varo[ix, iyo] = vari[ixi, iyi]
else:
varo[ix, iyo] = vari[ixi, iyi+1]
# Extrapolation
if extrap != "no":
varo = extrap1d(varo, extrap)
return varo | f7a9c03b1cca3844a9aad3d954fa2a189134a69f | 9,351 |
def registros():
"""Records page."""
return render_template('records.html') | b72cffbdf966f8c94831da76fd901ce9cba60aac | 9,352 |
def cal_evar(rss, matrix_v):
"""
Args:
rss:
matrix_v:
Returns:
"""
evar = 1 - (rss / np.sum(matrix_v ** 2))
return evar | 21f1d71ba98dafe948a5a24e4101968531ec1e30 | 9,353 |
def split_path(path):
"""
public static List<String> splitPath(String path)
* Converts a path expression into a list of keys, by splitting on period
* and unquoting the individual path elements. A path expression is usable
* with a {@link Config}, while individual path elements are usable with a
* {@link ConfigObject}.
* <p>
* See the overview documentation for {@link Config} for more detail on path
* expressions vs. keys.
*
* @param path
* a path expression
* @return the individual keys in the path
* @throws ConfigException
* if the path expression is invalid
"""
return impl_util.split_path(path) | 9e102d7f7b512331165f51e6055daeaf4f56b61a | 9,354 |
import os
from sys import path
import numpy
import math
def load_dataset_RGB(split_th = 0.8, ext='.jpg'):
""" Default: 80% for training, 20% for testing """
positive_dir = '/media/himanshu/ce640fc3-0289-402c-9150-793e07e55b8c/visapp2018code/RGB/data/positive'
negative_dir = '/media/himanshu/ce640fc3-0289-402c-9150-793e07e55b8c/visapp2018code/RGB/data/negative'
# positive_dir = '/home/himanshu/Documents/Projects/DLbasics/visapp2018code/RGB/data/positive'
# negative_dir = '/home/himanshu/Documents/Projects/DLbasics/visapp2018code/RGB/data/negative'
t_files = os.listdir(path.join(positive_dir, '1'))
total_pos_files = len(t_files)
t_files = os.listdir(path.join(negative_dir, '1'))
total_neg_files = len(t_files)
print('pos files: ',total_pos_files)
print('neg files: ',total_neg_files)
# total_files = total_pos_files + total_neg_files
total_files = 1000
X1 = numpy.zeros( (total_files,96,128,3), dtype=numpy.uint8 )
X2 = numpy.zeros( (total_files,96,128,3), dtype=numpy.uint8 )
X3 = numpy.zeros( (total_files,96,128,3), dtype=numpy.uint8 )
y = numpy.zeros( (total_files), dtype=numpy.uint8 )
pos_file_counter = 0
neg_file_counter = 0
total_counter = 0
while total_counter < total_files:
show_progress(max_val=total_files, present_val=total_counter)
if total_counter % 2 == 0: # case: positive
im1_path = path.join(positive_dir, '1', str(pos_file_counter+1)+ext)
im2_path = path.join(positive_dir, '2', str(pos_file_counter+1)+ext)
im3_path = path.join(positive_dir, '3', str(pos_file_counter+1)+ext)
im1 = cv2.imread(im1_path)
im2 = cv2.imread(im2_path)
im3 = cv2.imread(im3_path)
# cv2.imshow("Image 1", im1)
# cv2.imshow("Image 2", im2)
# cv2.imshow("Image 3", im3)
# cv2.waitKey(0)
X1[total_counter,:,:,:] = cv2.resize(im1, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image
X2[total_counter,:,:,:] = cv2.resize(im2, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image
X3[total_counter,:,:,:] = cv2.resize(im3, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image
y[total_counter] = 1
pos_file_counter += 1
else:
im1_path = path.join(negative_dir, '1', str(neg_file_counter+1)+ext)
im2_path = path.join(negative_dir, '2', str(neg_file_counter+1)+ext)
im3_path = path.join(negative_dir, '3', str(neg_file_counter+1)+ext)
im1 = cv2.imread(im1_path)
im2 = cv2.imread(im2_path)
im3 = cv2.imread(im3_path)
# cv2.imshow("Image 1", im1)
# cv2.imshow("Image 2", im2)
# cv2.imshow("Image 3", im3)
# cv2.waitKey(0)
X1[total_counter,:,:,:] = cv2.resize(im1, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image
X2[total_counter,:,:,:] = cv2.resize(im2, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image
X3[total_counter,:,:,:] = cv2.resize(im3, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image
y[total_counter] = 0
neg_file_counter += 1
total_counter += 1
# normalize inputs from 0-255 to 0.0-1.0
X1 = X1.astype('float32')
X2 = X2.astype('float32')
X3 = X3.astype('float32')
X1 = X1 / 255.0
X2 = X2 / 255.0
X3 = X3 / 255.0
training_samples_limit = math.ceil( split_th * total_counter )
X1_train = X1[0:training_samples_limit,:,:,:]
X2_train = X2[0:training_samples_limit,:,:,:]
X3_train = X3[0:training_samples_limit,:,:,:]
y_train = y[0:training_samples_limit]
X1_test = X1[training_samples_limit:total_counter,:,:,:]
X2_test = X2[training_samples_limit:total_counter,:,:,:]
X3_test = X3[training_samples_limit:total_counter,:,:,:]
y_test = y[training_samples_limit:total_counter]
return [X1_train, X2_train, X3_train, y_train, X1_test, X2_test, X3_test, y_test] | 90abee394944318f567660276e5dd1b2d577225b | 9,355 |
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_d():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.gap_sizes = [0, 16, 64, 16, 64, 128, 256, 0]
return hparams | b0a56031e06cff42df4cdeab55e01322be8e439d | 9,356 |
def leaveOneOut_Input_v4( leaveOut ):
"""
Generate observation matrix and vectors
Y, F
Those observations are trimed for the leave-one-out evaluation. Therefore, the leaveOut
indicates the CA id to be left out, ranging from 1-77
"""
des, X = generate_corina_features('ca')
X = np.delete(X, leaveOut-1, 0)
popul = X[:,0].reshape(X.shape[0],1)
pvt = X[:,2] # poverty index of each CA
# poi_cnt = getFourSquareCount(leaveOut)
# poi_cnt = np.divide(poi_cnt, popul) * 10000
poi_dist = getFourSquarePOIDistribution(leaveOut)
poi_dist = np.divide(poi_dist, popul) * 10000
F_dist = generate_geographical_SpatialLag_ca( leaveOut=leaveOut )
F_flow = generate_transition_SocialLag(year=2010, lehd_type=0, region='ca', leaveOut=leaveOut)
F_taxi = getTaxiFlow(leaveOut = leaveOut)
Y = retrieve_crime_count(year=2010, col=['total'], region='ca')
Y = np.delete(Y, leaveOut-1, 0)
Y = np.divide(Y, popul) * 10000
F = []
n = Y.size
Yd = []
for i in range(n):
for j in range(n):
if i != j:
wij = np.array( [F_dist[i,j],
actualFlowInteraction(pvt[i], pvt[j]) * F_flow[i,j],
F_taxi[i,j] ])
# fij = np.concatenate( (X[i], poi_dist[i], wij * Y[j][0]), 0)
fij = np.concatenate( (X[i], wij * Y[j][0]), 0)
F.append(fij)
Yd.append(Y[i])
F = np.array(F)
np.append(F, np.ones( (F.shape[0], 1) ), axis=1)
Yd = np.array(Yd)
Yd.resize( (Yd.size, 1) )
return Yd, F | 0078bda71345d31cf24f4d1c4ceeafa768357ad4 | 9,357 |
import logging
def common_inroom_auth_response(name, request, operate, op_args):
"""
> 通用的需要通过验证用户存在、已登录、身处 Room 的操作。
参数:
- name: 操作名,用于日志输出;
- request: Flask 传来的 request;
- operate: 具体的操作函数,参数为需要从 request.form 中提取的值,返回值为成功后的response json;
- op_args: operate 函数的 参数名 str 组成的列表。
返回:response json
说明:
这个函数会从 request.form 中提取 from_uid 以及 op_args 中指定的所有值,若没有对应的值,会返回 unexpected;
然后该函数会对用户是否 exist、login、inRoom 进行检测,若有不满足,返回 from_not_exist,from_not_login 或 from_not_in_room;
通过了所有验证后,将调用 operate 函数,并用 argument unpacking 的方法把解析得到的 args 传给 operate。
"""
try:
assert request.method == 'POST', "method should be POST"
assert isinstance(op_args, (tuple, list)), "op_args should be tuple or list"
from_uid = None
args = {}
try:
from_uid = request.form["from_uid"]
for i in op_args:
args[i] = request.form[i]
except KeyError:
raise RequestError("not enough param")
# 发起用户验证
if not au.byUid.exist(from_uid):
logging.critical('<{name}>: from_not_exist. from_uid = {from_uid}'.format(name=name, from_uid=from_uid))
return response_error(get_simple_error_content(ResponseError.from_not_exist))
if not au.byUid.logined(from_uid):
logging.error('<{name}>: from_not_login. from_uid = {from_uid}'.format(name=name, from_uid=from_uid))
return response_error(get_simple_error_content(ResponseError.from_not_login))
if not au.byUid.inroom(from_uid):
logging.error('<{name}>: from_not_in_room. from_uid = {from_uid}'.format(name=name, from_uid=from_uid))
return response_error(get_simple_error_content(ResponseError.from_not_in_room))
# 通过验证,可以操作
return operate(**args)
except Exception as e:
logging.error('<{name}>: unexpected. request = {request}, request.form = {form}'.format(
name=name, request=request, form=request.form))
return response_unexpected(e) | b11607f2d0a6a656c65cf464010f10634389f0bf | 9,358 |
def get_pca(acts, compute_dirns=False):
""" Takes in neuron activations acts and number of components.
Returns principle components and associated eigenvalues.
Args:
acts: numpy array, shape=(num neurons, num datapoints)
n_components: integer, number of pca components to reduce
to
"""
assert acts.shape[0] < acts.shape[1], ("input must be number of neurons"
"by datapoints")
# center activations
means = np.mean(acts, axis=1, keepdims=True)
cacts = acts - means
# compute PCA using SVD
U, S, V = np.linalg.svd(cacts, full_matrices=False)
return_dict = {}
return_dict["eigenvals"] = S
return_dict["neuron_coefs"] = U.T
if compute_dirns:
return_dict["pca_dirns"] = np.dot(U.T, cacts) + means
return return_dict | 25620178e340f58b3d13ed0de4ee6d324abcb3ef | 9,359 |
def refresh_lease(lease_id, client_id, epoch, ttl):
"""
Update the timeout on the lease if my_id is the lease owner, else fail.
:param lease_id:
:param client_id:
:param ttl: number of seconds in the future to set the expiration to, can lengthen or shorten expiration depending on current value of lease.
:param epoch:
:return: new expiration datetime
"""
if not lease_id:
raise ValueError(lease_id)
if not client_id:
raise ValueError(client_id)
if not epoch:
raise ValueError(epoch)
if not ttl:
raise ValueError(ttl)
retries = REFRESH_RETRIES
logger.debug('Refreshing lease {}'.format(lease_id))
while retries > 0:
try:
with session_scope() as db:
lease = db.query(Lease).with_for_update(of=Lease, nowait=False).get((lease_id))
if not lease:
raise KeyError(lease_id)
if lease.held_by != client_id:
raise Exception('Lock no longer held by this id')
else:
lease.set_holder(lease.held_by, duration_sec=ttl)
return lease.to_json()
except KeyError:
raise
except Exception as e:
if not is_lock_acquisition_error(e):
logger.exception('Failed updating lease duration for {} due to exception'.format(lease_id))
retries -= 1
else:
logger.error('Failed updating lease duration {} after all retries'.format(lease_id))
return None | 6f26ad7887ab26d7c90dfd2c7881f7b50ec5fa1b | 9,360 |
import os
def checkLastJob(jobsFolder):
"""Count number of folders in folder
:param jobsFolder: directory with jobs
:return: number of created jobs
"""
allFolders = os.listdir(jobsFolder)
jobsFolders = [f for f in allFolders if f.startswith('job')]
jobsCount = len(jobsFolders)
return jobsCount | 17ea83ffc07134d91d66a08ee59ed85b499c8e4d | 9,361 |
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
#imgCopy = np.uint8(img)
return cv2.Canny(img, low_threshold, high_threshold) | 80e8d4ad99c769887e85577b46f6028ceea0b9f6 | 9,362 |
def pairwise_two_tables(left_table, right_table, allow_no_right=True):
"""
>>> pairwise_two_tables(
... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")],
... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2")],
... )
[('L1', 'R1'), ('L2', 'R2'), ('L3', 'R3')]
>>> pairwise_two_tables(
... [("tag1", "L1"), ("tag2", "L2")],
... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2")],
... )
Traceback (most recent call last):
vrename.NoLeftValueError: ('tag3', 'R3')
>>> pairwise_two_tables(
... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")],
... [("tag1", "R1"), ("tag3", "R3")],
... False,
... )
Traceback (most recent call last):
vrename.NoRightValueError: ('tag2', 'L2')
>>> pairwise_two_tables(
... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")],
... [("tag1", "R1"), ("tag3", "R3")],
... )
[('L1', 'R1'), ('L2', None), ('L3', 'R3')]
>>> pairwise_two_tables(
... [("tag1", "L1"), ("tag1", "L1-B")],
... []
... )
Traceback (most recent call last):
vrename.DuplicateTagError: ('tag1', ['L1', 'L1-B'])
>>> pairwise_two_tables(
... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")],
... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2"), ("tag1", "R1-B")],
... )
Traceback (most recent call last):
vrename.MultipleRightValueError: ('tag1', 'L1', ['R1', 'R1-B'])
"""
pairs = []
for tag, (left, rights) in _confront_two_tables(left_table, right_table):
if len(rights) > 1:
raise MultipleRightValueError(tag, left, rights)
if not rights:
if allow_no_right:
pairs.append((left, None))
else:
raise NoRightValueError(tag, left)
else:
pairs.append((left, rights[0]))
return pairs | aabcccc2ade9b00ed5bdac32f9cc4a7a4cc718c3 | 9,363 |
def augment_stochastic_shifts(seq, augment_shifts):
"""Apply a stochastic shift augmentation.
Args:
seq: input sequence of size [batch_size, length, depth]
augment_shifts: list of int offsets to sample from
Returns:
shifted and padded sequence of size [batch_size, length, depth]
"""
shift_index = tf.random.uniform(shape=[], minval=0,
maxval=len(augment_shifts), dtype=tf.int64)
shift_value = tf.gather(tf.constant(augment_shifts), shift_index)
seq = tf.cond(tf.not_equal(shift_value, 0),
lambda: shift_sequence(seq, shift_value),
lambda: seq)
return seq | 1afd682e1f665d4d0786e729e6789a6459b4457c | 9,364 |
def _SourceArgs(parser):
"""Add mutually exclusive source args."""
source_group = parser.add_mutually_exclusive_group()
def AddImageHelp():
"""Returns detailed help for `--image` argument."""
template = """\
An image to apply to the disks being created. When using
this option, the size of the disks must be at least as large as
the image size. Use ``--size'' to adjust the size of the disks.
{alias_table}
This flag is mutually exclusive with ``--source-snapshot''.
"""
indent = template.find(template.lstrip()[0])
return template.format(
alias_table=image_utils.GetImageAliasTable(indent=indent))
image = source_group.add_argument(
'--image',
help='An image to apply to the disks being created.')
image.detailed_help = AddImageHelp
image_utils.AddImageProjectFlag(parser)
source_group.add_argument(
'--image-family',
help=('The family of the image that the boot disk will be initialized '
'with. When a family is used instead of an image, the latest '
'non-deprecated image associated with that family is used.')
)
source_snapshot = source_group.add_argument(
'--source-snapshot',
help='A source snapshot used to create the disks.')
source_snapshot.detailed_help = """\
A source snapshot used to create the disks. It is safe to
delete a snapshot after a disk has been created from the
snapshot. In such cases, the disks will no longer reference
the deleted snapshot. To get a list of snapshots in your
current project, run `gcloud compute snapshots list`. A
snapshot from an existing disk can be created using the
'gcloud compute disks snapshot' command. This flag is mutually
exclusive with ``--image''.
When using this option, the size of the disks must be at least
as large as the snapshot size. Use ``--size'' to adjust the
size of the disks.
""" | dfa44ed54c4efba666f19c850a0eacffe85cafa0 | 9,365 |
def get_all_species_links_on_page(url):
"""Get all the species list on the main page."""
data, dom = get_dom(url)
table = dom.find('.tableguides.table-responsive > table a')
links = []
for link in table:
if link is None or link.text is None:
continue
links.append(dict(
name=link.text.strip().lower(),
url=DAVES_URL_BY_SPECIES + link.get('href')
))
return links | 4a63d78b699150c37ccc9aa30d9fa6dae39d801b | 9,366 |
def gen_image_name(reference: str) -> str:
"""
Generate the image name as a signing input, based on the docker reference.
Args:
reference: Docker reference for the signed content,
e.g. registry.redhat.io/redhat/community-operator-index:v4.9
"""
no_tag = reference.split(":")[0]
image_parts = no_tag.split("/")
return "/".join(image_parts[1:]) | ccaecfe91b5b16a85e3a3c87b83bbc91e54080b1 | 9,367 |
def adaptive_confidence_interval(values, max_iterations=1000, alpha=0.05, trials=5, variance_threshold=0.5):
""" Compute confidence interval using as few iterations as possible """
try_iterations = 10
while True:
intervals = [confidence_interval(values, try_iterations, alpha) for _ in range(trials)]
band_variance = variance([upper_bound - lower_bound for lower_bound, upper_bound in intervals])
print(try_iterations, band_variance)
if band_variance < variance_threshold or try_iterations > max_iterations:
return intervals[np.random.randint(0, trials)], try_iterations
try_iterations *= 2 | 47c1861384d94a13beaf86eed5ad88a2ad2fb80f | 9,368 |
def get_chat_id(update):
"""
Get chat ID from update.
Args:
update (instance): Incoming update.
Returns:
(int, None): Chat ID.
"""
# Simple messages
if update.message:
return update.message.chat_id
# Menu callbacks
if update.callback_query:
return update.callback_query.message.chat_id
return None | 1669382fd430b445ea9e3a1306c1e68bf2ec0013 | 9,369 |
def action(fun):
"""Method decorator signaling to Deployster Python wrapper that this method is a resource action."""
# TODO: validate function has single 'args' argument (using 'inspect.signature(fun)')
fun.action = True
return fun | 2720d1d44f325f5c8462c9957365b08de7b7847e | 9,370 |
def chooseCommertialCity(commercial_cities):
"""
Parameters
----------
commercial_cities : list[dict]
Returns
-------
commercial_city : dict
"""
print(_('From which city do you want to buy resources?\n'))
for i, city in enumerate(commercial_cities):
print('({:d}) {}'.format(i + 1, city['name']))
selected_city_index = read(min=1, max=len(commercial_cities))
return commercial_cities[selected_city_index - 1] | 6e39c1922a1560f6d3d442cf5d14b764f2c08437 | 9,371 |
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights)
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram('pre_activations', preactivate)
activations = act(preactivate, name="activation")
tf.summary.histogram('activations', activations)
return activations | 38976aa68de06e131f0e2fd8056216ce9bfcba77 | 9,372 |
def move_right_row(row, debug=True):
"""move single row to right."""
if debug:
print(row)
row_del_0 = []
for i in row: # copy non-zero blocks
if i != 0:
row_del_0.append(i)
#print(row_del_0)
row = row_del_0
i = 0
j = len(row_del_0) - 1
while i < j: # combine blocks
#print(i, j)
if row[j] == row[j-1]:
row[j-1] *= 2
del row[j]
j -= 2
else:
j -= 1
#print(i, j)
#print(row_del_0)
for i in range(4 - len(row_del_0)): # insert zeros
row_del_0.insert(0,0)
if debug:
print(row)
return row | b779f2c336a62aaff23f35584460c765a9d7e408 | 9,373 |
def get_validate_platform(cmd, platform):
"""Gets and validates the Platform from both flags
:param str platform: The name of Platform passed by user in --platform flag
"""
OS, Architecture = cmd.get_models('OS', 'Architecture', operation_group='runs')
# Defaults
platform_os = OS.linux.value
platform_arch = Architecture.amd64.value
platform_variant = None
if platform:
platform_split = platform.split('/')
platform_os = platform_split[0]
platform_arch = platform_split[1] if len(platform_split) > 1 else Architecture.amd64.value
platform_variant = platform_split[2] if len(platform_split) > 2 else None
platform_os = platform_os.lower()
platform_arch = platform_arch.lower()
valid_os = get_valid_os(cmd)
valid_arch = get_valid_architecture(cmd)
valid_variant = get_valid_variant(cmd)
if platform_os not in valid_os:
raise CLIError(
"'{0}' is not a valid value for OS specified in --platform. "
"Valid options are {1}.".format(platform_os, ','.join(valid_os))
)
if platform_arch not in valid_arch:
raise CLIError(
"'{0}' is not a valid value for Architecture specified in --platform. "
"Valid options are {1}.".format(
platform_arch, ','.join(valid_arch))
)
if platform_variant and (platform_variant not in valid_variant):
raise CLIError(
"'{0}' is not a valid value for Variant specified in --platform. "
"Valid options are {1}.".format(
platform_variant, ','.join(valid_variant))
)
return platform_os, platform_arch, platform_variant | 3b9150c400ed28e322108ba531c7f4c5ac450da1 | 9,374 |
def get_path_cost(slice, offset, parameters):
"""
part of the aggregation step, finds the minimum costs in a D x M slice (where M = the number of pixels in the
given direction)
:param slice: M x D array from the cost volume.
:param offset: ignore the pixels on the border.
:param parameters: structure containing parameters of the algorithm.
:return: M x D array of the minimum costs for a given slice in a given direction.
"""
other_dim = slice.shape[0]
disparity_dim = slice.shape[1]
disparities = [d for d in range(disparity_dim)] * disparity_dim
disparities = np.array(disparities).reshape(disparity_dim, disparity_dim)
penalties = np.zeros(shape=(disparity_dim, disparity_dim), dtype=slice.dtype)
penalties[np.abs(disparities - disparities.T) == 1] = parameters.P1
penalties[np.abs(disparities - disparities.T) > 1] = parameters.P2
minimum_cost_path = np.zeros(shape=(other_dim, disparity_dim), dtype=slice.dtype)
minimum_cost_path[offset - 1, :] = slice[offset - 1, :]
for i in range(offset, other_dim):
previous_cost = minimum_cost_path[i - 1, :]
current_cost = slice[i, :]
costs = np.repeat(previous_cost, repeats=disparity_dim, axis=0).reshape(disparity_dim, disparity_dim)
costs = np.amin(costs + penalties, axis=0)
minimum_cost_path[i, :] = current_cost + costs - np.amin(previous_cost)
return minimum_cost_path | 06348e483cd7cba012354ecdcadcd0381b0b7dfb | 9,375 |
def generate_cyclic_group(order, identity_name="e", elem_name="a", name=None, description=None):
"""Generates a cyclic group with the given order.
Parameters
----------
order : int
A positive integer
identity_name : str
The name of the group's identity element
Defaults to 'e'
elem_name : str
Prefix for all non-identity elements
Default is a1, a2, a3, ...
name : str
The group's name. Defaults to 'Zn',
where n is the order.
description : str
A description of the group. Defaults to
'Autogenerated cyclic group of order n',
where n is the group's order.
Returns
-------
Group
A cyclic group of the given order
"""
if name:
nm = name
else:
nm = "Z" + str(order)
if description:
desc = description
else:
desc = f"Autogenerated cyclic group of order {order}"
elements = [identity_name, elem_name] + [f"{elem_name}^" + str(i) for i in range(2, order)]
table = [[((a + b) % order) for b in range(order)] for a in range(order)]
return Group(nm, desc, elements, table) | ed79547dfde64ece136456a8c5d7ce00c4317176 | 9,376 |
def loadTextureBMP(filepath):
"""
Loads the BMP file given in filepath, creates an OpenGL texture from it
and returns the texture ID.
"""
data = np.array(Image.open(filepath))
width = data.shape[0]
height = data.shape[1]
textureID = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, textureID)
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGB,
width,
height,
0,
GL_BGR,
GL_UNSIGNED_BYTE,
data,
)
# default parameters for now. Can be parameterized in the future
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
glGenerateMipmap(GL_TEXTURE_2D)
return textureID | dd80584afc644fa23c2aef919a24152ea5b3696e | 9,377 |
def get_pixeldata(ds: "Dataset") -> "np.ndarray":
"""Return a :class:`numpy.ndarray` of the pixel data.
.. versionadded:: 2.1
Parameters
----------
ds : pydicom.dataset.Dataset
The :class:`Dataset` containing an :dcm:`Image Pixel
<part03/sect_C.7.6.3.html>` module and the *Pixel Data* to be
converted.
Returns
-------
numpy.ndarray
The contents of (7FE0,0010) *Pixel Data* as a 1D array.
"""
expected_len = get_expected_length(ds, 'pixels')
frame_len = expected_len // getattr(ds, "NumberOfFrames", 1)
# Empty destination array for our decoded pixel data
arr = np.empty(expected_len, pixel_dtype(ds))
generate_offsets = range(0, expected_len, frame_len)
for frame, offset in zip(generate_frames(ds, False), generate_offsets):
arr[offset:offset + frame_len] = frame
return arr | 418603d30bf272affc0e63615e94d4cce11b1bf2 | 9,378 |
import time
def timeit(method):
""" Timing Decorator Function Written by Fahim Sakri of PythonHive (https://medium.com/pthonhive) """
def timed(*args, **kwargs):
time_start = time.time()
time_end = time.time()
result = method(*args, **kwargs)
if 'log_time' in kwargs:
name = kwargs.get('log_name', method.__name__.upper())
kwargs['log_time'][name] = int((time_end - time_start) * 1000)
else:
print('\n{} {:5f} ms'.format(method.__name__, (time_end - time_start) * 1000))
return result
return timed | 598667950bc707b72239af9f4e5a3248dbe64d96 | 9,379 |
def allot_projects():
"""
The primary function that allots the projects to the employees.
It generates a maximum match for a bipartite graph of employees and projects.
:return: A tuple having the allotments, count of employees allotted and
total project headcount (a project where two people need to work
will have a headcount ot two).
"""
allotments = []
try:
emp_data = pd.read_pickle(EMPLOYEE_PICKLE_FILE)
project_data = pd.read_pickle(PROJECT_PICKLE_FILE)
except IOError as e:
print("Either employee or project data is not present. No allocation done.")
return [], 0, 0
employees = []
for _, emp_row in emp_data.iterrows():
transposed = emp_row.T
transposed = transposed[transposed == 1]
skills = set(transposed.index)
employees.append(
{
'name': emp_row['name'],
'value': skills
}
)
projects = []
for _, project_row in project_data.iterrows():
n = int(project_row['emp_count'])
for i in range(n):
projects.append(
{
'absolute_name': project_row['name'],
'name': project_row['name'] + str(i),
'value': set(project_row[['domain', 'language', 'type']].values)
}
)
matrix = []
for e in employees:
row = []
for p in projects:
if len(e['value'].intersection(p['value'])) >= 2:
row.append(1)
else:
row.append(0)
matrix.append(row)
employee_count = len(employees)
project_count = len(projects)
# An array to keep track of the employees assigned to projects.
# The value of emp_project_match[i] is the employee number
# assigned to project i.
# If value = -1 indicates nobody is allocated that project.
emp_project_match = [-1] * project_count
def bipartite_matching(employee, match, seen):
"""
A recursive solution that returns true if a project mapping
for employee is possible.
:param employee: The employee for whom we are searching a project.
:param match: Stores the assigned employees to projects.
:param seen: An array to tell the projects available to employee.
:return: `True` if match for employee is possible else `False`.
"""
# Try every project one by one.
for project in range(project_count):
# If employee is fit for the project and the project has not yet been
# checked by the employee.
if matrix[employee][project] and seen[project] is False:
# Mark the project as checked by employee.
seen[project] = True
# If project is not assigned to anyone or previously assigned to someone else
# (match[project]) but that employee could find an alternate project.
# Note that since the project has been seen by the employee above, it will
# not be available to match[project].
if match[project] == -1 or bipartite_matching(match[project], match, seen):
match[project] = employee
return True
return False
emp_allotted = 0
for emp in range(employee_count):
# Mark all projects as not seen for next applicant.
projects_seen = [False] * project_count
# Find if the employee can be assigned a project
if bipartite_matching(emp, emp_project_match, projects_seen):
emp_allotted += 1
for p, e in enumerate(emp_project_match):
if e != -1:
allotments.append((employees[e]['name'], projects[p]['absolute_name']))
return allotments, emp_allotted, project_count | 774df8714cd47eb2a7affe34480dfec682010341 | 9,380 |
import requests
def upload_record(data, headers, rdr_project_id):
""" Upload a supplied record to the research data repository
"""
request_url = f"https://api.figsh.com/v2/account/projects/{rdr_project_id}/articles"
response = requests.post(request_url, headers=headers, json=data)
return response.json() | 7431234757668f9157f90aa8a9c335ee0e2a043b | 9,381 |
def datetime_to_ts(str_datetime):
"""
Transform datetime representation to unix epoch.
:return:
"""
if '1969-12-31' in str_datetime:
# ignore default values
return None
else:
# convert to timestamp
if '.' in str_datetime: # check whether it has milliseconds or not
dt = tutil.strff_to_date(str_datetime)
else:
dt = tutil.strf_to_date(str_datetime)
ts = tutil.date_to_ts(dt)
return ts | 83b40abc6c5ce027cf04cd2335b2f35e235451d0 | 9,382 |
import functools
def is_codenames_player(funct):
"""
Decorator that ensures the method is called only by a codenames player.
Args:
funct (function): Function being decorated
Returns:
function: Decorated function which calls the original function
if the user is a codenames player, and returns otherwise
"""
@functools.wraps(funct)
def wrapper(*args, **kwargs):
if not current_user.is_authenticated or current_user.codenames_player is None:
return None
return funct(*args, **kwargs)
return wrapper | 814bc929bbd20e8c527bd5c922a25823a4bdbefc | 9,383 |
def same_container_2():
"""
Another reason to use `same_container=co.SameContainer.NEW` to force
container sharing is when you want your commands to share a filesystem.
This makes a download and analyze pipeline very easy, for example, because
you simply download the data to the filesystem in one node, and the analyze
node can automatically see it. There is no need to put the data in a
separate data store.
However, there is a downside to this `same_container` mode. When sharing a
container, Exec nodes will _always run in serial_, even if the parent is a
Parallel node. So, you lose the ability to parallelize. Also, when the
SameContainer nodes finish, the container exits and that local filesystem is
lost. To restore the container state you need to rerun all the nodes, making
debugging or error resetting a little more awkward.
"""
dockerfile = "./docker/Dockerfile.curl"
image = co.Image(dockerfile=dockerfile, context=".")
with co.Parallel(image=image, doc=co.util.magic_doc()) as same_container_example:
with co.Serial(name="shared_filesystem", same_container=co.SameContainer.NEW):
data_url = "http://api.eia.gov/bulk/STEO.zip"
co.Exec(f"curl {data_url} > /tmp/data.zip", name="download")
co.Exec("unzip -pq /tmp/data.zip > /tmp/data", name="unzip")
co.Exec("wc -l /tmp/data", name="analyze")
with co.Parallel(name="always_serial", same_container=co.SameContainer.NEW):
co.Exec("echo I cannot run in parallel", name="parallel_exec_1")
co.Exec("echo even if I want to", name="parallel_exec_2")
return same_container_example | 06e1a3c70c33ed9b7de46ebabb1d5f2f6bc83266 | 9,384 |
def get(args) -> str:
"""Creates manifest in XML format.
@param args: Arguments provided by the user from command line
@return: Generated xml manifest string
"""
arguments = {
'target': args.target,
'targetType': None if args.nohddl else args.targettype,
'path': args.path,
'nohddl': args.nohddl
}
manifest = ('<?xml version="1.0" encoding="utf-8"?>' +
'<manifest>' +
'<type>config</type>' +
'<config>' +
'<cmd>get_element</cmd>' +
'{0}' +
'<configtype>' +
'{1}' +
'<get>' +
'{2}' +
'</get>' +
'</configtype>' +
'</config>' +
'</manifest>').format(
create_xml_tag(arguments, "targetType"),
create_xml_tag(arguments, "target"),
create_xml_tag(arguments, "path")
)
print("manifest {0}".format(manifest))
return manifest | 7b859952d7eda9d6dedd916bb3534d225c3d9593 | 9,385 |
from typing import Callable
def elementwise(op: Callable[..., float], *ds: D) -> NumDict:
"""
Apply op elementwise to a sequence of numdicts.
If any numdict in ds has None default, then default is None, otherwise the
new default is calculated by running op on all defaults.
"""
keys: set = set()
keys.update(*ds)
grouped: dict = {}
defaults: list = []
for d in ds:
defaults.append(d.default)
for k in keys:
grouped.setdefault(k, []).append(d[k])
if any([d is None for d in defaults]):
default = None
else:
default = op(defaults)
return NumDict({k: op(grouped[k]) for k in grouped}, default) | 4e7dce60d01e8bcec722a5a6d60d15920a6a91c5 | 9,386 |
import torch
def sigmoid_focal_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = -1,
gamma: float = 2,
reduction: str = "none",
) -> torch.Tensor:
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
Loss tensor with the reduction option applied.
"""
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss | e792c1bea37bcc26ff323a764fc56e0f4bbd0bc5 | 9,387 |
def arcsin(x):
"""Return the inverse sine or the arcsin.
INPUTS
x (Variable object or real number)
RETURNS
if x is a Variable, then return a Variable with val and der.
if x is a real number, then return the value of arcsin(x).
EXAMPLES
>>> x = Variable(0, name='x')
>>> t = arcsin(x)
>>> print(t.val, t.der['x'])
0.0 1.0
"""
try:
val = np.arcsin(x.val)
ders = defaultdict(float)
sec_ders = defaultdict(float)
for key in x.der:
ders[key] += 1/((1 - x.val**2)**0.5) * (x.der[key])
sec_ders[key] += (x.val*x.der[key]**2-x.sec_der[key]*(x.val**2-1))/((1-x.val**2)**1.5)
return Variable(val, ders, sec_ders)
except AttributeError:
return np.arcsin(x) | a5d899dae9b4fc33b6ddf2e2786ec6eee8508541 | 9,388 |
import tqdm
def preprocessing(texts, words, label, coef=0.3, all_tasks=False, include_repeat=True, progressbar=True):
"""
the function returns the processed array for the Spacy standard
"""
train = []
enit = {}
assert 0 < coef <= 1, f"The argument must be in the range (0 < coef <= 1) --> {coef}"
if all_tasks:
words_f = unique(flatten(words, coef))
if coef == 1:
include_repeat = False
else:
assert len(texts) == len(words), f"Data must be same length: ({len(texts)}, {len(words)})"
print("\n\033[31mcoef is ignored because you are using all_tasks=False")
for i in tqdm(range((len(texts))), disable=not progressbar):
if all_tasks:
if include_repeat:
words_f = unique(chain(words_f, words[i]))
enit['entities'] = to_format(texts[i], words_f, label)
else:
enit['entities'] = to_format(texts[i], words[i], label)
train.append((texts[i], deepcopy(enit)))
return train | f10c27f8ed686d45a1c778bdf557f88ad3f3bdfa | 9,389 |
import numpy
import math
def rotate(
input,
angle,
axes=(1, 0),
reshape=True,
output=None,
order=3,
mode="constant",
cval=0.0,
prefilter=True,
*,
allow_float32=True,
):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
``axes`` parameter using spline interpolation of the requested order.
Args:
input (cupy.ndarray): The input array.
angle (float): The rotation angle in degrees.
axes (tuple of 2 ints): The two axes that define the plane of rotation.
Default is the first two axes.
reshape (bool): If ``reshape`` is True, the output shape is adapted so
that the input array is contained completely in the output. Default
is True.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. The order has to be in the range 0-5.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The rotated input.
Notes
-----
This implementation handles boundary modes 'wrap' and 'reflect' correctly,
while SciPy prior to release 1.6.0 does not. So, if comparing to older
SciPy, some disagreement near the borders may occur.
For ``order > 1`` with ``prefilter == True``, the spline prefilter boundary
conditions are implemented correctly only for modes 'mirror', 'reflect'
and 'grid-wrap'.
.. seealso:: :func:`scipy.ndimage.zoom`
"""
_check_parameter("rotate", order, mode)
if mode == "opencv":
mode = "_opencv_edge"
input_arr = input
axes = list(axes)
if axes[0] < 0:
axes[0] += input_arr.ndim
if axes[1] < 0:
axes[1] += input_arr.ndim
if axes[0] > axes[1]:
axes = [axes[1], axes[0]]
if axes[0] < 0 or input_arr.ndim <= axes[1]:
raise ValueError("invalid rotation plane specified")
ndim = input_arr.ndim
rad = numpy.deg2rad(angle)
sin = math.sin(rad)
cos = math.cos(rad)
# determine offsets and output shape as in scipy.ndimage.rotate
rot_matrix = numpy.array([[cos, sin], [-sin, cos]])
img_shape = numpy.asarray(input_arr.shape)
in_plane_shape = img_shape[axes]
if reshape:
# Compute transformed input bounds
iy, ix = in_plane_shape
out_bounds = rot_matrix @ [[0, 0, iy, iy], [0, ix, 0, ix]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
else:
out_plane_shape = img_shape[axes]
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
in_center = (in_plane_shape - 1) / 2
output_shape = img_shape
output_shape[axes] = out_plane_shape
output_shape = tuple(output_shape)
matrix = numpy.identity(ndim)
matrix[axes[0], axes[0]] = cos
matrix[axes[0], axes[1]] = sin
matrix[axes[1], axes[0]] = -sin
matrix[axes[1], axes[1]] = cos
offset = numpy.zeros(ndim, dtype=float)
offset[axes] = in_center - out_center
matrix = cupy.asarray(matrix)
offset = cupy.asarray(offset)
return affine_transform(
input,
matrix,
offset,
output_shape,
output,
order,
mode,
cval,
prefilter,
allow_float32=allow_float32,
) | 04b7f3dc66d09c0b69ba97579972e131cc96b375 | 9,390 |
def generate_url_fragment(title, blog_post_id):
"""Generates the url fragment for a blog post from the title of the blog
post.
Args:
title: str. The title of the blog post.
blog_post_id: str. The unique blog post ID.
Returns:
str. The url fragment of the blog post.
"""
lower_title = title.lower()
hyphenated_title = lower_title.replace(' ', '-')
lower_id = blog_post_id.lower()
return hyphenated_title + '-' + lower_id | c846e6203fa4782c6dc92c892b9e0b6c7a0077b5 | 9,391 |
def update_cluster(cluster, cluster_args, args,
api=None, path=None, session_file=None):
"""Updates cluster properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating cluster. %s\n" %
get_url(cluster))
log_message(message, log_file=session_file,
console=args.verbosity)
cluster = api.update_cluster(cluster, cluster_args)
check_resource_error(cluster, "Failed to update cluster: %s"
% cluster['resource'])
cluster = check_resource(cluster, api.get_cluster, query_string=FIELDS_QS)
if is_shared(cluster):
message = dated("Shared cluster link. %s\n" %
get_url(cluster, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, cluster)
return cluster | d07e3969e90cbc84f5329845e540c3b1a03d86b5 | 9,392 |
def get_post_by_user(user_id: int, database: Session) -> Post:
"""
"""
post = database.query(Post).filter(
Post.user == user_id).order_by(Post.id.desc()).all()
logger.info("FOI RETORNADO DO BANCO AS SEGUINTES CONTRIBUIÇÕES: %s", post)
return post | 9274caf4d484e68bdc7c852aff6360d9674b2957 | 9,393 |
import yaml
def unformat_bundle(formattedBundle):
"""
Converts a push-ready bundle into a structured object by changing
stringified yaml of 'customResourceDefinitions', 'clusterServiceVersions',
and 'packages' into lists of objects.
Undoing the format helps simplify bundle validation.
:param formattedBundle: A push-ready bundle
"""
bundle = BuildCmd()._get_empty_bundle()
if 'data' not in formattedBundle:
return bundle
if 'customResourceDefinitions' in formattedBundle['data']:
customResourceDefinitions = yaml.safe_load(
formattedBundle['data']['customResourceDefinitions'])
if customResourceDefinitions:
bundle['data']['customResourceDefinitions'] = customResourceDefinitions
if 'clusterServiceVersions' in formattedBundle['data']:
clusterServiceVersions = yaml.safe_load(
formattedBundle['data']['clusterServiceVersions'])
if clusterServiceVersions:
bundle['data']['clusterServiceVersions'] = clusterServiceVersions
if 'packages' in formattedBundle['data']:
packages = yaml.safe_load(formattedBundle['data']['packages'])
if packages:
bundle['data']['packages'] = packages
return bundle | fcc6067fab89dffa8e31e47da42060ca11a48478 | 9,394 |
def supports_box_chars() -> bool:
"""Check if the encoding supports Unicode box characters."""
return all(map(can_encode, "│─└┘┌┐")) | 82a3f57429d99dc2b16055d2b7103656ec2e05e5 | 9,395 |
def calculate_intersection_over_union(box_data, prior_boxes):
"""Calculate intersection over union of box_data with respect to
prior_boxes.
Arguments:
ground_truth_data: numpy array with shape (4) indicating x_min, y_min,
x_max and y_max coordinates of the bounding box.
prior_boxes: numpy array with shape (num_boxes, 4).
Returns:
intersections_over_unions: numpy array with shape (num_boxes) which
corresponds to the intersection over unions of box_data with respect
to all prior_boxes.
"""
x_min = box_data[0]
y_min = box_data[1]
x_max = box_data[2]
y_max = box_data[3]
prior_boxes_x_min = prior_boxes[:, 0]
prior_boxes_y_min = prior_boxes[:, 1]
prior_boxes_x_max = prior_boxes[:, 2]
prior_boxes_y_max = prior_boxes[:, 3]
# calculating the intersection
intersections_x_min = np.maximum(prior_boxes_x_min, x_min)
intersections_y_min = np.maximum(prior_boxes_y_min, y_min)
intersections_x_max = np.minimum(prior_boxes_x_max, x_max)
intersections_y_max = np.minimum(prior_boxes_y_max, y_max)
intersected_widths = intersections_x_max - intersections_x_min
intersected_heights = intersections_y_max - intersections_y_min
intersected_widths = np.maximum(intersected_widths, 0)
intersected_heights = np.maximum(intersected_heights, 0)
intersections = intersected_widths * intersected_heights
# calculating the union
prior_box_widths = prior_boxes_x_max - prior_boxes_x_min
prior_box_heights = prior_boxes_y_max - prior_boxes_y_min
prior_box_areas = prior_box_widths * prior_box_heights
box_width = x_max - x_min
box_height = y_max - y_min
ground_truth_area = box_width * box_height
unions = prior_box_areas + ground_truth_area - intersections
intersection_over_union = intersections / unions
return intersection_over_union | 6ac634953a92f1b81096f72209ae5d25d46aa4e6 | 9,396 |
def get_report(analytics, start_date, end_date = 'today'):
"""Queries the Analytics Reporting API V4.
Args:
analytics: An authorized Analytics Reporting API V4 service object.
Returns: The Analytics Reporting API V4 response.
"""
return analytics.reports().batchGet(
body={
'reportRequests': [
{
'viewId': VIEW_ID,
'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
'metrics': [{'expression': 'ga:userTimingValue'}],
'dimensions': [ {'name': 'ga:userTimingVariable'}]
}]
}
).execute() | cac0b27a40f6a648a4d3f41aa9615dc114700f84 | 9,397 |
def write_pinout_xml(pinout, out_xml=None):
"""
write the pinout dict to xml format with no attributes. this is verbose
but is the preferred xml format
"""
ar = []
for k in sort_alpha_num(pinout.keys()):
d = pinout[k]
d['number'] = k
# ar.append({'pin': d})
ar.append( d)
# x = dicttoxml(pinout, custom_root='pin_map', attr_type=True)
my_item_func = lambda x: 'pin'
# x = dicttoxml(ar, custom_root='pin_map', attr_type=False)
x = dicttoxml(ar, custom_root='pin_map', item_func=my_item_func, attr_type=False)
reparsed = minidom.parseString(x)
xml_pretty = reparsed.toprettyxml(indent=" ")
if out_xml != None:
fo = open(out_xml, "w")
fo.write(xml_pretty)
fo.close()
return xml_pretty | 7f2fff341b11eb29bf672a4f78b0fc0971a26cbc | 9,398 |
import json
def get_solution(request, level=1):
"""Returns a render of answers.html"""
context = RequestContext(request)
cheat_message = '\\text{Ulovlig tegn har blitt brukt i svar}'
required_message = '\\text{Svaret ditt har ikke utfylt alle krav}'
render_to = 'game/answer.html'
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
form_values = form.process()
template = Template.objects.get(pk=form_values['primary_key'])
user_answer = form_values['user_answer']
try:
disallowed = json.loads(template.disallowed)
except ValueError:
disallowed = []
try:
required = json.loads(template.required)
except ValueError:
required = []
context_dict = make_answer_context_dict(form_values)
if (cheat_check(user_answer, disallowed, form_values['variable_dictionary'].split('§'))) and\
(form_values['template_type'] == 'normal') and (context_dict['user_won']):
context_dict['answer'] = cheat_message
return render_to_response(render_to, context_dict, context)
elif (required_check(user_answer, required, form_values['variable_dictionary'].split('§'))) and \
(form_values['template_type'] == 'normal') and (context_dict['user_won']):
context_dict['answer'] = required_message
return render_to_response(render_to, context_dict, context)
if request.is_ajax():
new_user_rating, new_star = change_level_rating(template, request.user, context_dict['user_won'],
form_values['template_type'], level)
context_dict['chapter_id'] = request.POST['chapter_id']
context_dict['ulp'] = int(new_user_rating)
context_dict['new_star'] = new_star
context_dict['stars'] = get_user_stars_for_level(request.user, Level.objects.get(pk=level))
return render_to_response(render_to, context_dict, context)
else:
change_elo(template, request.user, context_dict['user_won'], form_values['template_type'])
render_to_response(render_to, context_dict, context)
else:
print(form.errors) | f6d5b7c90b656d2302c1aaf2935fc39bcf882a03 | 9,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.