code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def acceptmergerequest(self, project_id, mergerequest_id, merge_commit_message=None):
"""
Update an existing merge request.
:param project_id: ID of the project originating the merge request
:param mergerequest_id: ID of the merge request to accept
:param merge_commit_message: Custom merge commit message
:return: dict of the modified merge request
"""
data = {'merge_commit_message': merge_commit_message}
request = requests.put(
'{0}/{1}/merge_request/{2}/merge'.format(self.projects_url, project_id, mergerequest_id),
data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return request.json()
else:
return False | Update an existing merge request.
:param project_id: ID of the project originating the merge request
:param mergerequest_id: ID of the merge request to accept
:param merge_commit_message: Custom merge commit message
:return: dict of the modified merge request | Below is the the instruction that describes the task:
### Input:
Update an existing merge request.
:param project_id: ID of the project originating the merge request
:param mergerequest_id: ID of the merge request to accept
:param merge_commit_message: Custom merge commit message
:return: dict of the modified merge request
### Response:
def acceptmergerequest(self, project_id, mergerequest_id, merge_commit_message=None):
"""
Update an existing merge request.
:param project_id: ID of the project originating the merge request
:param mergerequest_id: ID of the merge request to accept
:param merge_commit_message: Custom merge commit message
:return: dict of the modified merge request
"""
data = {'merge_commit_message': merge_commit_message}
request = requests.put(
'{0}/{1}/merge_request/{2}/merge'.format(self.projects_url, project_id, mergerequest_id),
data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return request.json()
else:
return False |
def username_matches_request_user(view_fn):
"""Checks if the username matches the request user, and if so replaces
username with the actual user object.
Returns 404 if the username does not exist, and 403 if it doesn't match.
"""
@wraps(view_fn)
def wrapper(request, username, *args, **kwargs):
User = get_user_model()
user = get_object_or_404(User, username=username)
if user != request.user:
return HttpResponseForbidden()
else:
return view_fn(request, user, *args, **kwargs)
return wrapper | Checks if the username matches the request user, and if so replaces
username with the actual user object.
Returns 404 if the username does not exist, and 403 if it doesn't match. | Below is the the instruction that describes the task:
### Input:
Checks if the username matches the request user, and if so replaces
username with the actual user object.
Returns 404 if the username does not exist, and 403 if it doesn't match.
### Response:
def username_matches_request_user(view_fn):
"""Checks if the username matches the request user, and if so replaces
username with the actual user object.
Returns 404 if the username does not exist, and 403 if it doesn't match.
"""
@wraps(view_fn)
def wrapper(request, username, *args, **kwargs):
User = get_user_model()
user = get_object_or_404(User, username=username)
if user != request.user:
return HttpResponseForbidden()
else:
return view_fn(request, user, *args, **kwargs)
return wrapper |
def app_view(name, **kwargs):
"""
Show manifest content for an application.
If application is not uploaded, an error will be displayed.
"""
ctx = Context(**kwargs)
ctx.execute_action('app:view', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
}) | Show manifest content for an application.
If application is not uploaded, an error will be displayed. | Below is the the instruction that describes the task:
### Input:
Show manifest content for an application.
If application is not uploaded, an error will be displayed.
### Response:
def app_view(name, **kwargs):
"""
Show manifest content for an application.
If application is not uploaded, an error will be displayed.
"""
ctx = Context(**kwargs)
ctx.execute_action('app:view', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
}) |
def volume(self, volume):
"""See `volume`."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) | See `volume`. | Below is the the instruction that describes the task:
### Input:
See `volume`.
### Response:
def volume(self, volume):
"""See `volume`."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) |
def validate(cls, keystr):
""" raises cls.Bad if keys has errors """
if "#{" in keystr:
# it's a template with keys vars
keys = cls.from_template(keystr)
for k in keys:
cls.validate_one(cls.extract(k))
else:
# plain keys str
cls.validate_one(keystr) | raises cls.Bad if keys has errors | Below is the the instruction that describes the task:
### Input:
raises cls.Bad if keys has errors
### Response:
def validate(cls, keystr):
""" raises cls.Bad if keys has errors """
if "#{" in keystr:
# it's a template with keys vars
keys = cls.from_template(keystr)
for k in keys:
cls.validate_one(cls.extract(k))
else:
# plain keys str
cls.validate_one(keystr) |
def visit_unload_from_select(element, compiler, **kw):
"""Returns the actual sql query for the UnloadFromSelect class."""
template = """
UNLOAD (:select) TO :unload_location
CREDENTIALS :credentials
{manifest}
{header}
{delimiter}
{encrypted}
{fixed_width}
{gzip}
{add_quotes}
{null}
{escape}
{allow_overwrite}
{parallel}
{region}
{max_file_size}
"""
el = element
qs = template.format(
manifest='MANIFEST' if el.manifest else '',
header='HEADER' if el.header else '',
delimiter=(
'DELIMITER AS :delimiter' if el.delimiter is not None else ''
),
encrypted='ENCRYPTED' if el.encrypted else '',
fixed_width='FIXEDWIDTH AS :fixed_width' if el.fixed_width else '',
gzip='GZIP' if el.gzip else '',
add_quotes='ADDQUOTES' if el.add_quotes else '',
escape='ESCAPE' if el.escape else '',
null='NULL AS :null_as' if el.null is not None else '',
allow_overwrite='ALLOWOVERWRITE' if el.allow_overwrite else '',
parallel='PARALLEL OFF' if not el.parallel else '',
region='REGION :region' if el.region is not None else '',
max_file_size=(
'MAXFILESIZE :max_file_size MB'
if el.max_file_size is not None else ''
),
)
query = sa.text(qs)
if el.delimiter is not None:
query = query.bindparams(sa.bindparam(
'delimiter', value=element.delimiter, type_=sa.String,
))
if el.fixed_width:
query = query.bindparams(sa.bindparam(
'fixed_width',
value=_process_fixed_width(el.fixed_width),
type_=sa.String,
))
if el.null is not None:
query = query.bindparams(sa.bindparam(
'null_as', value=el.null, type_=sa.String
))
if el.region is not None:
query = query.bindparams(sa.bindparam(
'region', value=el.region, type_=sa.String
))
if el.max_file_size is not None:
max_file_size_mib = float(el.max_file_size) / 1024 / 1024
query = query.bindparams(sa.bindparam(
'max_file_size', value=max_file_size_mib, type_=sa.Float
))
return compiler.process(
query.bindparams(
sa.bindparam('credentials', value=el.credentials, type_=sa.String),
sa.bindparam(
'unload_location', value=el.unload_location, type_=sa.String,
),
sa.bindparam(
'select',
value=compiler.process(
el.select,
literal_binds=True,
),
type_=sa.String,
),
),
**kw
) | Returns the actual sql query for the UnloadFromSelect class. | Below is the the instruction that describes the task:
### Input:
Returns the actual sql query for the UnloadFromSelect class.
### Response:
def visit_unload_from_select(element, compiler, **kw):
"""Returns the actual sql query for the UnloadFromSelect class."""
template = """
UNLOAD (:select) TO :unload_location
CREDENTIALS :credentials
{manifest}
{header}
{delimiter}
{encrypted}
{fixed_width}
{gzip}
{add_quotes}
{null}
{escape}
{allow_overwrite}
{parallel}
{region}
{max_file_size}
"""
el = element
qs = template.format(
manifest='MANIFEST' if el.manifest else '',
header='HEADER' if el.header else '',
delimiter=(
'DELIMITER AS :delimiter' if el.delimiter is not None else ''
),
encrypted='ENCRYPTED' if el.encrypted else '',
fixed_width='FIXEDWIDTH AS :fixed_width' if el.fixed_width else '',
gzip='GZIP' if el.gzip else '',
add_quotes='ADDQUOTES' if el.add_quotes else '',
escape='ESCAPE' if el.escape else '',
null='NULL AS :null_as' if el.null is not None else '',
allow_overwrite='ALLOWOVERWRITE' if el.allow_overwrite else '',
parallel='PARALLEL OFF' if not el.parallel else '',
region='REGION :region' if el.region is not None else '',
max_file_size=(
'MAXFILESIZE :max_file_size MB'
if el.max_file_size is not None else ''
),
)
query = sa.text(qs)
if el.delimiter is not None:
query = query.bindparams(sa.bindparam(
'delimiter', value=element.delimiter, type_=sa.String,
))
if el.fixed_width:
query = query.bindparams(sa.bindparam(
'fixed_width',
value=_process_fixed_width(el.fixed_width),
type_=sa.String,
))
if el.null is not None:
query = query.bindparams(sa.bindparam(
'null_as', value=el.null, type_=sa.String
))
if el.region is not None:
query = query.bindparams(sa.bindparam(
'region', value=el.region, type_=sa.String
))
if el.max_file_size is not None:
max_file_size_mib = float(el.max_file_size) / 1024 / 1024
query = query.bindparams(sa.bindparam(
'max_file_size', value=max_file_size_mib, type_=sa.Float
))
return compiler.process(
query.bindparams(
sa.bindparam('credentials', value=el.credentials, type_=sa.String),
sa.bindparam(
'unload_location', value=el.unload_location, type_=sa.String,
),
sa.bindparam(
'select',
value=compiler.process(
el.select,
literal_binds=True,
),
type_=sa.String,
),
),
**kw
) |
def has_document(self, doc_url):
""" Check if the content of the given document is present
in the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: Boolean
:returns: True if the data is present, False otherwise
"""
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
row = c.fetchone()
c.close()
return self.__exists_row_not_too_old(row) | Check if the content of the given document is present
in the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: Boolean
:returns: True if the data is present, False otherwise | Below is the the instruction that describes the task:
### Input:
Check if the content of the given document is present
in the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: Boolean
:returns: True if the data is present, False otherwise
### Response:
def has_document(self, doc_url):
""" Check if the content of the given document is present
in the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: Boolean
:returns: True if the data is present, False otherwise
"""
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
row = c.fetchone()
c.close()
return self.__exists_row_not_too_old(row) |
def rm(i):
"""
Input: {
(repo_uoa) - repo UOA ; can be wild cards
module_uoa - module UOA ; can be wild cards
data_uoa - data UOA ; can be wild cards
(force) - if 'yes', force deleting without questions
or
(f) - to be compatible with rm -f
(share) - if 'yes', try to remove via GIT
(tags) - use these tags in format tags=x,y,z to prune rm
or
(search_string) - prune entries with expression *?
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
a=i.get('repo_uoa','')
# Check if global writing is allowed
r=check_writing({'repo_uoa':a, 'delete':'yes'})
if r['return']>0: return r
o=i.get('out','')
m=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if duoa=='':
return {'return':1, 'error':'data UOA is not defined'}
lst=[]
tags=i.get('tags','')
ss=i.get('search_string','')
# Check wildcards
if a.find('*')>=0 or a.find('?')>=0 or m.find('*')>=0 or m.find('?')>=0 or duoa.find('*')>=0 or duoa.find('?')>=0:
if tags=='' and ss=='':
r=list_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
else:
r=search({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa, 'tags':tags, 'search_string':ss})
if r['return']>0: return r
lst=r['lst']
else:
# Find path to data
r=find_path_to_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
ruoa=r.get('repo_uoa','')
ruid=r.get('repo_uid','')
muoa=r.get('module_uoa','')
muid=r.get('module_uid','')
duid=r.get('data_uid','')
duoa=r.get('data_alias','')
if duoa=='': duoa=duid
lst.append({'path':p, 'repo_uoa':ruoa, 'repo_uid':ruid,
'module_uoa':muoa, 'module_uid':muid,
'data_uoa':duoa, 'data_uid': duid})
force=i.get('force','')
if force=='':
force=i.get('f','')
first=True
for ll in lst:
p=ll['path']
pm=os.path.split(p)[0]
muid=ll['module_uid']
muoa=ll['module_uoa']
duid=ll['data_uid']
duoa=ll['data_uoa']
if duoa!=duid: dalias=duoa
else: dalias=''
# Get user-friendly CID
x=muoa+':'+duoa
if o=='con':
# Try to check if has data name (useful for env)
p2=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_info'])
if os.path.isfile(p2):
r2=load_json_file({'json_file':p2})
if r2['return']==0:
x2=r2['dict'].get('data_name','')
if x2!='':
x='"'+x2+'"\n '+x
xcuoa=x+' ('+muid+':'+duid+')'
# Check repo/module writing
ii={'module_uoa':m, 'repo_uoa':ll['repo_uoa'], 'repo_uid':ll['repo_uid']}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
shr=i.get('share','')
if shr=='yes':
rshared='git'
rsync='yes'
# If interactive
to_delete=True
if o=='con' and force!='yes':
r=inp({'text':'Are you sure to delete CK entry '+xcuoa+' ? (y/N): '})
c=r['string'].lower()
if c!='y' and c!='yes': to_delete=False
# If deleting
if to_delete:
# First remove alias if exists
if dalias!='':
# Delete alias
r=delete_alias({'path':pm, 'data_alias':dalias, 'data_uid':duid, 'repo_dict':rd, 'share':shr})
if r['return']>0: return r
if rshared!='':
pp=os.path.split(p)
pp0=pp[0]
pp1=pp[1]
ppp=os.getcwd()
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['rm'].replace('$#files#$', pp1)
rx=os.system(ss)
# Delete directory
r={'return':0}
if os.path.isdir(p):
r=delete_directory({'path':p})
if rshared!='':
os.chdir(ppp)
if r['return']>0: return r
# Check if need to delete index
if cfg.get('use_indexing','')=='yes':
path='/'+muid+'/'+duid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
if o=='con':
out(' Entry '+xcuoa+' was successfully deleted!')
return {'return':0} | Input: {
(repo_uoa) - repo UOA ; can be wild cards
module_uoa - module UOA ; can be wild cards
data_uoa - data UOA ; can be wild cards
(force) - if 'yes', force deleting without questions
or
(f) - to be compatible with rm -f
(share) - if 'yes', try to remove via GIT
(tags) - use these tags in format tags=x,y,z to prune rm
or
(search_string) - prune entries with expression *?
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
} | Below is the the instruction that describes the task:
### Input:
Input: {
(repo_uoa) - repo UOA ; can be wild cards
module_uoa - module UOA ; can be wild cards
data_uoa - data UOA ; can be wild cards
(force) - if 'yes', force deleting without questions
or
(f) - to be compatible with rm -f
(share) - if 'yes', try to remove via GIT
(tags) - use these tags in format tags=x,y,z to prune rm
or
(search_string) - prune entries with expression *?
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
### Response:
def rm(i):
"""
Input: {
(repo_uoa) - repo UOA ; can be wild cards
module_uoa - module UOA ; can be wild cards
data_uoa - data UOA ; can be wild cards
(force) - if 'yes', force deleting without questions
or
(f) - to be compatible with rm -f
(share) - if 'yes', try to remove via GIT
(tags) - use these tags in format tags=x,y,z to prune rm
or
(search_string) - prune entries with expression *?
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
a=i.get('repo_uoa','')
# Check if global writing is allowed
r=check_writing({'repo_uoa':a, 'delete':'yes'})
if r['return']>0: return r
o=i.get('out','')
m=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if duoa=='':
return {'return':1, 'error':'data UOA is not defined'}
lst=[]
tags=i.get('tags','')
ss=i.get('search_string','')
# Check wildcards
if a.find('*')>=0 or a.find('?')>=0 or m.find('*')>=0 or m.find('?')>=0 or duoa.find('*')>=0 or duoa.find('?')>=0:
if tags=='' and ss=='':
r=list_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
else:
r=search({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa, 'tags':tags, 'search_string':ss})
if r['return']>0: return r
lst=r['lst']
else:
# Find path to data
r=find_path_to_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
ruoa=r.get('repo_uoa','')
ruid=r.get('repo_uid','')
muoa=r.get('module_uoa','')
muid=r.get('module_uid','')
duid=r.get('data_uid','')
duoa=r.get('data_alias','')
if duoa=='': duoa=duid
lst.append({'path':p, 'repo_uoa':ruoa, 'repo_uid':ruid,
'module_uoa':muoa, 'module_uid':muid,
'data_uoa':duoa, 'data_uid': duid})
force=i.get('force','')
if force=='':
force=i.get('f','')
first=True
for ll in lst:
p=ll['path']
pm=os.path.split(p)[0]
muid=ll['module_uid']
muoa=ll['module_uoa']
duid=ll['data_uid']
duoa=ll['data_uoa']
if duoa!=duid: dalias=duoa
else: dalias=''
# Get user-friendly CID
x=muoa+':'+duoa
if o=='con':
# Try to check if has data name (useful for env)
p2=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_info'])
if os.path.isfile(p2):
r2=load_json_file({'json_file':p2})
if r2['return']==0:
x2=r2['dict'].get('data_name','')
if x2!='':
x='"'+x2+'"\n '+x
xcuoa=x+' ('+muid+':'+duid+')'
# Check repo/module writing
ii={'module_uoa':m, 'repo_uoa':ll['repo_uoa'], 'repo_uid':ll['repo_uid']}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
shr=i.get('share','')
if shr=='yes':
rshared='git'
rsync='yes'
# If interactive
to_delete=True
if o=='con' and force!='yes':
r=inp({'text':'Are you sure to delete CK entry '+xcuoa+' ? (y/N): '})
c=r['string'].lower()
if c!='y' and c!='yes': to_delete=False
# If deleting
if to_delete:
# First remove alias if exists
if dalias!='':
# Delete alias
r=delete_alias({'path':pm, 'data_alias':dalias, 'data_uid':duid, 'repo_dict':rd, 'share':shr})
if r['return']>0: return r
if rshared!='':
pp=os.path.split(p)
pp0=pp[0]
pp1=pp[1]
ppp=os.getcwd()
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['rm'].replace('$#files#$', pp1)
rx=os.system(ss)
# Delete directory
r={'return':0}
if os.path.isdir(p):
r=delete_directory({'path':p})
if rshared!='':
os.chdir(ppp)
if r['return']>0: return r
# Check if need to delete index
if cfg.get('use_indexing','')=='yes':
path='/'+muid+'/'+duid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
if o=='con':
out(' Entry '+xcuoa+' was successfully deleted!')
return {'return':0} |
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result | Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays | Below is the the instruction that describes the task:
### Input:
Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
### Response:
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result |
def _annotate_groups(self):
"""
Annotate the objects belonging to separate (non-connected) graphs with
individual indices.
"""
g = {}
for x in self.metadata:
g[x.id] = x
idx = 0
for x in self.metadata:
if not hasattr(x, 'group'):
x.group = idx
idx += 1
neighbors = set()
for e in self.edges:
if e.src == x.id:
neighbors.add(e.dst)
if e.dst == x.id:
neighbors.add(e.src)
for nb in neighbors:
g[nb].group = min(x.group, getattr(g[nb], 'group', idx))
# Assign the edges to the respective groups. Both "ends" of the edge
# should share the same group so just use the first object's group.
for e in self.edges:
e.group = g[e.src].group
self._max_group = idx | Annotate the objects belonging to separate (non-connected) graphs with
individual indices. | Below is the the instruction that describes the task:
### Input:
Annotate the objects belonging to separate (non-connected) graphs with
individual indices.
### Response:
def _annotate_groups(self):
"""
Annotate the objects belonging to separate (non-connected) graphs with
individual indices.
"""
g = {}
for x in self.metadata:
g[x.id] = x
idx = 0
for x in self.metadata:
if not hasattr(x, 'group'):
x.group = idx
idx += 1
neighbors = set()
for e in self.edges:
if e.src == x.id:
neighbors.add(e.dst)
if e.dst == x.id:
neighbors.add(e.src)
for nb in neighbors:
g[nb].group = min(x.group, getattr(g[nb], 'group', idx))
# Assign the edges to the respective groups. Both "ends" of the edge
# should share the same group so just use the first object's group.
for e in self.edges:
e.group = g[e.src].group
self._max_group = idx |
def update(self, distributor_id, grade_id, session, trade_type=None):
'''taobao.fenxiao.cooperation.update 更新合作关系等级
供应商更新合作的分销商等级'''
request = TOPRequest('taobao.fenxiao.cooperation.update')
request['distributor_id'] = distributor_id
request['grade_id'] = grade_id
if trade_type!=None: request['trade_type'] = trade_type
self.create(self.execute(request, session), fields=['is_success'])
return self.is_success | taobao.fenxiao.cooperation.update 更新合作关系等级
供应商更新合作的分销商等级 | Below is the the instruction that describes the task:
### Input:
taobao.fenxiao.cooperation.update 更新合作关系等级
供应商更新合作的分销商等级
### Response:
def update(self, distributor_id, grade_id, session, trade_type=None):
'''taobao.fenxiao.cooperation.update 更新合作关系等级
供应商更新合作的分销商等级'''
request = TOPRequest('taobao.fenxiao.cooperation.update')
request['distributor_id'] = distributor_id
request['grade_id'] = grade_id
if trade_type!=None: request['trade_type'] = trade_type
self.create(self.execute(request, session), fields=['is_success'])
return self.is_success |
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit('.', 1)
try: # Prefer prebuild script in working directory
if pb_mod_path.count('.') >= 1: # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit('.', 1)
mod_folder_path_fragments = mod_folder_path.split('.')
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"import prebuild script ", bold=True) + 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)))
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"find prebuild script ", bold=True) + 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)) + 'in module "{pb_mod_path}"'.format(
pb_mod_path=pb_mod_path))
prebuild_function = getattr(module_, pb_func)
prebuild_function() | Parse and execute the prebuild_script from the zappa_settings. | Below is the the instruction that describes the task:
### Input:
Parse and execute the prebuild_script from the zappa_settings.
### Response:
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit('.', 1)
try: # Prefer prebuild script in working directory
if pb_mod_path.count('.') >= 1: # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit('.', 1)
mod_folder_path_fragments = mod_folder_path.split('.')
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"import prebuild script ", bold=True) + 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)))
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"find prebuild script ", bold=True) + 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)) + 'in module "{pb_mod_path}"'.format(
pb_mod_path=pb_mod_path))
prebuild_function = getattr(module_, pb_func)
prebuild_function() |
def unconsumed_ranges(self):
"""Return an IntervalTree of unconsumed ranges, of the format
(start, end] with the end value not being included
"""
res = IntervalTree()
prev = None
# normal iteration is not in a predictable order
ranges = sorted([x for x in self.range_set], key=lambda x: x.begin)
for rng in ranges:
if prev is None:
prev = rng
continue
res.add(Interval(prev.end, rng.begin))
prev = rng
# means we've seeked past the end
if len(self.range_set[self.tell()]) != 1:
res.add(Interval(prev.end, self.tell()))
return res | Return an IntervalTree of unconsumed ranges, of the format
(start, end] with the end value not being included | Below is the the instruction that describes the task:
### Input:
Return an IntervalTree of unconsumed ranges, of the format
(start, end] with the end value not being included
### Response:
def unconsumed_ranges(self):
"""Return an IntervalTree of unconsumed ranges, of the format
(start, end] with the end value not being included
"""
res = IntervalTree()
prev = None
# normal iteration is not in a predictable order
ranges = sorted([x for x in self.range_set], key=lambda x: x.begin)
for rng in ranges:
if prev is None:
prev = rng
continue
res.add(Interval(prev.end, rng.begin))
prev = rng
# means we've seeked past the end
if len(self.range_set[self.tell()]) != 1:
res.add(Interval(prev.end, self.tell()))
return res |
def can_handle(self, cls):
"""
this will theoretically be compatible with everything,
as cattrs can handle many basic types as well.
"""
# cattrs uses a Singledispatch like function
# under the hood.
f = self._cattrs_converter._structure_func.dispatch(cls)
return f != self._cattrs_converter._structure_default | this will theoretically be compatible with everything,
as cattrs can handle many basic types as well. | Below is the the instruction that describes the task:
### Input:
this will theoretically be compatible with everything,
as cattrs can handle many basic types as well.
### Response:
def can_handle(self, cls):
"""
this will theoretically be compatible with everything,
as cattrs can handle many basic types as well.
"""
# cattrs uses a Singledispatch like function
# under the hood.
f = self._cattrs_converter._structure_func.dispatch(cls)
return f != self._cattrs_converter._structure_default |
def DownloadReportToFile(self, report_job_id, export_format, outfile,
include_report_properties=False,
include_totals_row=None, use_gzip_compression=True):
"""Downloads report data and writes it to a file.
The report job must be completed before calling this function.
Args:
report_job_id: The ID of the report job to wait for, as a string.
export_format: The export format for the report file, as a string.
outfile: A writeable, file-like object to write to.
include_report_properties: Whether or not to include the report
properties (e.g. network, user, date generated...)
in the generated report.
include_totals_row: Whether or not to include the totals row.
use_gzip_compression: Whether or not to use gzip compression.
"""
service = self._GetReportService()
if include_totals_row is None: # True unless CSV export if not specified
include_totals_row = True if export_format != 'CSV_DUMP' else False
opts = {
'exportFormat': export_format,
'includeReportProperties': include_report_properties,
'includeTotalsRow': include_totals_row,
'useGzipCompression': use_gzip_compression
}
report_url = service.getReportDownloadUrlWithOptions(report_job_id, opts)
_data_downloader_logger.info('Request Summary: Report job ID: %s, %s',
report_job_id, opts)
response = self.url_opener.open(report_url)
_data_downloader_logger.debug(
'Incoming response: %s %s REDACTED REPORT DATA', response.code,
response.msg)
while True:
chunk = response.read(_CHUNK_SIZE)
if not chunk: break
outfile.write(chunk) | Downloads report data and writes it to a file.
The report job must be completed before calling this function.
Args:
report_job_id: The ID of the report job to wait for, as a string.
export_format: The export format for the report file, as a string.
outfile: A writeable, file-like object to write to.
include_report_properties: Whether or not to include the report
properties (e.g. network, user, date generated...)
in the generated report.
include_totals_row: Whether or not to include the totals row.
use_gzip_compression: Whether or not to use gzip compression. | Below is the the instruction that describes the task:
### Input:
Downloads report data and writes it to a file.
The report job must be completed before calling this function.
Args:
report_job_id: The ID of the report job to wait for, as a string.
export_format: The export format for the report file, as a string.
outfile: A writeable, file-like object to write to.
include_report_properties: Whether or not to include the report
properties (e.g. network, user, date generated...)
in the generated report.
include_totals_row: Whether or not to include the totals row.
use_gzip_compression: Whether or not to use gzip compression.
### Response:
def DownloadReportToFile(self, report_job_id, export_format, outfile,
include_report_properties=False,
include_totals_row=None, use_gzip_compression=True):
"""Downloads report data and writes it to a file.
The report job must be completed before calling this function.
Args:
report_job_id: The ID of the report job to wait for, as a string.
export_format: The export format for the report file, as a string.
outfile: A writeable, file-like object to write to.
include_report_properties: Whether or not to include the report
properties (e.g. network, user, date generated...)
in the generated report.
include_totals_row: Whether or not to include the totals row.
use_gzip_compression: Whether or not to use gzip compression.
"""
service = self._GetReportService()
if include_totals_row is None: # True unless CSV export if not specified
include_totals_row = True if export_format != 'CSV_DUMP' else False
opts = {
'exportFormat': export_format,
'includeReportProperties': include_report_properties,
'includeTotalsRow': include_totals_row,
'useGzipCompression': use_gzip_compression
}
report_url = service.getReportDownloadUrlWithOptions(report_job_id, opts)
_data_downloader_logger.info('Request Summary: Report job ID: %s, %s',
report_job_id, opts)
response = self.url_opener.open(report_url)
_data_downloader_logger.debug(
'Incoming response: %s %s REDACTED REPORT DATA', response.code,
response.msg)
while True:
chunk = response.read(_CHUNK_SIZE)
if not chunk: break
outfile.write(chunk) |
def to_fmt(self) -> str:
"""
Provide a useful representation of the register.
"""
infos = fmt.end(";\n", [])
s = fmt.sep(', ', [])
for ids in sorted(self.states.keys()):
s.lsdata.append(str(ids))
infos.lsdata.append(fmt.block('(', ')', [s]))
infos.lsdata.append("events:" + repr(self.events))
infos.lsdata.append(
"named_events:" + repr(list(self.named_events.keys()))
)
infos.lsdata.append("uid_events:" + repr(list(self.uid_events.keys())))
return infos | Provide a useful representation of the register. | Below is the the instruction that describes the task:
### Input:
Provide a useful representation of the register.
### Response:
def to_fmt(self) -> str:
"""
Provide a useful representation of the register.
"""
infos = fmt.end(";\n", [])
s = fmt.sep(', ', [])
for ids in sorted(self.states.keys()):
s.lsdata.append(str(ids))
infos.lsdata.append(fmt.block('(', ')', [s]))
infos.lsdata.append("events:" + repr(self.events))
infos.lsdata.append(
"named_events:" + repr(list(self.named_events.keys()))
)
infos.lsdata.append("uid_events:" + repr(list(self.uid_events.keys())))
return infos |
def flow(self)->FlowField:
"Access the flow-field grid after applying queued affine and coord transforms."
if self._affine_mat is not None:
self._flow = _affine_inv_mult(self._flow, self._affine_mat)
self._affine_mat = None
self.transformed = True
if len(self.flow_func) != 0:
for f in self.flow_func[::-1]: self._flow = f(self._flow)
self.transformed = True
self.flow_func = []
return self._flow | Access the flow-field grid after applying queued affine and coord transforms. | Below is the the instruction that describes the task:
### Input:
Access the flow-field grid after applying queued affine and coord transforms.
### Response:
def flow(self)->FlowField:
"Access the flow-field grid after applying queued affine and coord transforms."
if self._affine_mat is not None:
self._flow = _affine_inv_mult(self._flow, self._affine_mat)
self._affine_mat = None
self.transformed = True
if len(self.flow_func) != 0:
for f in self.flow_func[::-1]: self._flow = f(self._flow)
self.transformed = True
self.flow_func = []
return self._flow |
def getFilepaths(self, filename):
"""
Get home and mackup filepaths for given file
Args:
filename (str)
Returns:
home_filepath, mackup_filepath (str, str)
"""
return (os.path.join(os.environ['HOME'], filename),
os.path.join(self.mackup.mackup_folder, filename)) | Get home and mackup filepaths for given file
Args:
filename (str)
Returns:
home_filepath, mackup_filepath (str, str) | Below is the the instruction that describes the task:
### Input:
Get home and mackup filepaths for given file
Args:
filename (str)
Returns:
home_filepath, mackup_filepath (str, str)
### Response:
def getFilepaths(self, filename):
"""
Get home and mackup filepaths for given file
Args:
filename (str)
Returns:
home_filepath, mackup_filepath (str, str)
"""
return (os.path.join(os.environ['HOME'], filename),
os.path.join(self.mackup.mackup_folder, filename)) |
def syslog(server, enable=True):
'''
Configure syslog remote logging, by default syslog will automatically be
enabled if a server is specified. However, if you want to disable syslog
you will need to specify a server followed by False
CLI Example:
.. code-block:: bash
salt dell drac.syslog [SYSLOG IP] [ENABLE/DISABLE]
salt dell drac.syslog 0.0.0.0 False
'''
if enable and __execute_cmd('config -g cfgRemoteHosts -o \
cfgRhostsSyslogEnable 1'):
return __execute_cmd('config -g cfgRemoteHosts -o \
cfgRhostsSyslogServer1 {0}'.format(server))
return __execute_cmd('config -g cfgRemoteHosts -o cfgRhostsSyslogEnable 0') | Configure syslog remote logging, by default syslog will automatically be
enabled if a server is specified. However, if you want to disable syslog
you will need to specify a server followed by False
CLI Example:
.. code-block:: bash
salt dell drac.syslog [SYSLOG IP] [ENABLE/DISABLE]
salt dell drac.syslog 0.0.0.0 False | Below is the the instruction that describes the task:
### Input:
Configure syslog remote logging, by default syslog will automatically be
enabled if a server is specified. However, if you want to disable syslog
you will need to specify a server followed by False
CLI Example:
.. code-block:: bash
salt dell drac.syslog [SYSLOG IP] [ENABLE/DISABLE]
salt dell drac.syslog 0.0.0.0 False
### Response:
def syslog(server, enable=True):
'''
Configure syslog remote logging, by default syslog will automatically be
enabled if a server is specified. However, if you want to disable syslog
you will need to specify a server followed by False
CLI Example:
.. code-block:: bash
salt dell drac.syslog [SYSLOG IP] [ENABLE/DISABLE]
salt dell drac.syslog 0.0.0.0 False
'''
if enable and __execute_cmd('config -g cfgRemoteHosts -o \
cfgRhostsSyslogEnable 1'):
return __execute_cmd('config -g cfgRemoteHosts -o \
cfgRhostsSyslogServer1 {0}'.format(server))
return __execute_cmd('config -g cfgRemoteHosts -o cfgRhostsSyslogEnable 0') |
def sents(self: object, fileids: str):
"""
Tokenizes documents in the corpus by sentence
"""
for para in self.paras(fileids):
for sent in sent_tokenize(para):
yield sent | Tokenizes documents in the corpus by sentence | Below is the the instruction that describes the task:
### Input:
Tokenizes documents in the corpus by sentence
### Response:
def sents(self: object, fileids: str):
"""
Tokenizes documents in the corpus by sentence
"""
for para in self.paras(fileids):
for sent in sent_tokenize(para):
yield sent |
def get_range_info(array, component):
"""Get the data range of the array's component"""
r = array.GetRange(component)
comp_range = {}
comp_range['min'] = r[0]
comp_range['max'] = r[1]
comp_range['component'] = array.GetComponentName(component)
return comp_range | Get the data range of the array's component | Below is the the instruction that describes the task:
### Input:
Get the data range of the array's component
### Response:
def get_range_info(array, component):
"""Get the data range of the array's component"""
r = array.GetRange(component)
comp_range = {}
comp_range['min'] = r[0]
comp_range['max'] = r[1]
comp_range['component'] = array.GetComponentName(component)
return comp_range |
def call(self, methodname, *args, **kwargs):
"""Call a common method on all the plugins, if it exists."""
for plugin in self._plugins:
method = getattr(plugin, methodname, None)
if method is None:
continue
yield method(*args, **kwargs) | Call a common method on all the plugins, if it exists. | Below is the the instruction that describes the task:
### Input:
Call a common method on all the plugins, if it exists.
### Response:
def call(self, methodname, *args, **kwargs):
"""Call a common method on all the plugins, if it exists."""
for plugin in self._plugins:
method = getattr(plugin, methodname, None)
if method is None:
continue
yield method(*args, **kwargs) |
def unpack_version(version):
"""Unpack a single version integer into the two major and minor
components."""
minor_version = version % VERSION_MULTIPLIER
major_version = (version - minor_version) / VERSION_MULTIPLIER
return (major_version, minor_version) | Unpack a single version integer into the two major and minor
components. | Below is the the instruction that describes the task:
### Input:
Unpack a single version integer into the two major and minor
components.
### Response:
def unpack_version(version):
"""Unpack a single version integer into the two major and minor
components."""
minor_version = version % VERSION_MULTIPLIER
major_version = (version - minor_version) / VERSION_MULTIPLIER
return (major_version, minor_version) |
def translateprotocolmask(protocol):
"""Translate CardConnection protocol mask into PCSC protocol mask."""
pcscprotocol = 0
if None != protocol:
if CardConnection.T0_protocol & protocol:
pcscprotocol |= SCARD_PROTOCOL_T0
if CardConnection.T1_protocol & protocol:
pcscprotocol |= SCARD_PROTOCOL_T1
if CardConnection.RAW_protocol & protocol:
pcscprotocol |= SCARD_PROTOCOL_RAW
if CardConnection.T15_protocol & protocol:
pcscprotocol |= SCARD_PROTOCOL_T15
return pcscprotocol | Translate CardConnection protocol mask into PCSC protocol mask. | Below is the the instruction that describes the task:
### Input:
Translate CardConnection protocol mask into PCSC protocol mask.
### Response:
def translateprotocolmask(protocol):
"""Translate CardConnection protocol mask into PCSC protocol mask."""
pcscprotocol = 0
if None != protocol:
if CardConnection.T0_protocol & protocol:
pcscprotocol |= SCARD_PROTOCOL_T0
if CardConnection.T1_protocol & protocol:
pcscprotocol |= SCARD_PROTOCOL_T1
if CardConnection.RAW_protocol & protocol:
pcscprotocol |= SCARD_PROTOCOL_RAW
if CardConnection.T15_protocol & protocol:
pcscprotocol |= SCARD_PROTOCOL_T15
return pcscprotocol |
def change_object_content_type(self, container, obj, new_ctype,
guess=False, extra_info=None):
"""
Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None. Failure during the put will
result in a swift exception.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.change_object_content_type(container, obj,
new_ctype, guess=guess) | Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None. Failure during the put will
result in a swift exception.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more. | Below is the the instruction that describes the task:
### Input:
Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None. Failure during the put will
result in a swift exception.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
### Response:
def change_object_content_type(self, container, obj, new_ctype,
guess=False, extra_info=None):
"""
Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None. Failure during the put will
result in a swift exception.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.change_object_content_type(container, obj,
new_ctype, guess=guess) |
def add_vnic_template(self, vlan_id, ucsm_ip, vnic_template, physnet):
"""Adds an entry for a vlan_id on a SP template to the table."""
if not self.get_vnic_template_vlan_entry(vlan_id, vnic_template,
ucsm_ip, physnet):
vnic_t = ucsm_model.VnicTemplate(vlan_id=vlan_id,
vnic_template=vnic_template,
device_id=ucsm_ip,
physnet=physnet,
updated_on_ucs=False)
with self.session.begin(subtransactions=True):
self.session.add(vnic_t)
return vnic_t | Adds an entry for a vlan_id on a SP template to the table. | Below is the the instruction that describes the task:
### Input:
Adds an entry for a vlan_id on a SP template to the table.
### Response:
def add_vnic_template(self, vlan_id, ucsm_ip, vnic_template, physnet):
"""Adds an entry for a vlan_id on a SP template to the table."""
if not self.get_vnic_template_vlan_entry(vlan_id, vnic_template,
ucsm_ip, physnet):
vnic_t = ucsm_model.VnicTemplate(vlan_id=vlan_id,
vnic_template=vnic_template,
device_id=ucsm_ip,
physnet=physnet,
updated_on_ucs=False)
with self.session.begin(subtransactions=True):
self.session.add(vnic_t)
return vnic_t |
def _iter_candidate_groups(self, init_match, edges0, edges1):
"""Divide the edges into groups"""
# collect all end vertices0 and end vertices1 that belong to the same
# group.
sources = {}
for start_vertex0, end_vertex0 in edges0:
l = sources.setdefault(start_vertex0, [])
l.append(end_vertex0)
dests = {}
for start_vertex1, end_vertex1 in edges1:
start_vertex0 = init_match.reverse[start_vertex1]
l = dests.setdefault(start_vertex0, [])
l.append(end_vertex1)
for start_vertex0, end_vertices0 in sources.items():
end_vertices1 = dests.get(start_vertex0, [])
yield end_vertices0, end_vertices1 | Divide the edges into groups | Below is the the instruction that describes the task:
### Input:
Divide the edges into groups
### Response:
def _iter_candidate_groups(self, init_match, edges0, edges1):
"""Divide the edges into groups"""
# collect all end vertices0 and end vertices1 that belong to the same
# group.
sources = {}
for start_vertex0, end_vertex0 in edges0:
l = sources.setdefault(start_vertex0, [])
l.append(end_vertex0)
dests = {}
for start_vertex1, end_vertex1 in edges1:
start_vertex0 = init_match.reverse[start_vertex1]
l = dests.setdefault(start_vertex0, [])
l.append(end_vertex1)
for start_vertex0, end_vertices0 in sources.items():
end_vertices1 = dests.get(start_vertex0, [])
yield end_vertices0, end_vertices1 |
def open_connection(ip, username, password, function, args, write=False,
conn_timeout=5, sess_timeout=300, port=22):
""" Open a Jaide session with the device.
To open a Jaide session to the device, and run the appropriate function
against the device. Arguments for the downstream function are passed
through.
@param ip: String of the IP or hostname of the device to connect to.
@type ip: str
@param username: The string username used to connect to the device.
@type useranme: str
@param password: The string password used to connect to the device.
@type password: str
@param function: The downstream jaide.wrap function we'll be handing
| off the jaide.Jaide() object to execute the command
| once we've established the connection.
@type function: function pointer.
@param args: The arguments that we will hand off to the downstream
| function.
@type args: list
@param write: If set, it would be a tuple that we pass back as part of
| our return statement, so that any callback function
| can know how and where to put the output from the device.
@type write: False or tuple.
@param conn_timeout: Sets the connection timeout value. This is how
| we'll wait when connecting before classifying
| the device unreachable.
@type conn_timeout: int
@param sess_timeout: Sets the session timeout value. A higher value may
| be desired for long running commands, such as
| 'request system snapshot slice alternate'
@type sess_timeout: int
@param port: The port to connect to the device on. Defaults to 22.
@type port: int
@returns: We could return either just a string of the output from the
| device, or a tuple containing the information needed to write
| to a file and the string output from the device.
@rtype: Tuple or str
"""
# start with the header line on the output.
output = color('=' * 50 + '\nResults from device: %s\n' % ip, 'yel')
try:
# create the Jaide session object for the device.
conn = Jaide(ip, username, password, connect_timeout=conn_timeout,
session_timeout=sess_timeout, port=port)
if write is not False:
return write, output + function(conn, *args)
else:
return output + function(conn, *args)
except errors.SSHError:
output += color('Unable to connect to port %s on device: %s\n' %
(str(port), ip), 'red')
except errors.AuthenticationError: # NCClient auth failure
output += color('Authentication failed for device: %s' % ip, 'red')
except AuthenticationException: # Paramiko auth failure
output += color('Authentication failed for device: %s' % ip, 'red')
except SSHException as e:
output += color('Error connecting to device: %s\nError: %s' %
(ip, str(e)), 'red')
except socket.timeout:
output += color('Timeout exceeded connecting to device: %s' % ip, 'red')
except socket.gaierror:
output += color('No route to host, or invalid hostname: %s' % ip, 'red')
except socket.error:
output += color('The device refused the connection on port %s, or '
'no route to host.' % port, 'red')
if write is not False:
return write, output
else:
return output | Open a Jaide session with the device.
To open a Jaide session to the device, and run the appropriate function
against the device. Arguments for the downstream function are passed
through.
@param ip: String of the IP or hostname of the device to connect to.
@type ip: str
@param username: The string username used to connect to the device.
@type useranme: str
@param password: The string password used to connect to the device.
@type password: str
@param function: The downstream jaide.wrap function we'll be handing
| off the jaide.Jaide() object to execute the command
| once we've established the connection.
@type function: function pointer.
@param args: The arguments that we will hand off to the downstream
| function.
@type args: list
@param write: If set, it would be a tuple that we pass back as part of
| our return statement, so that any callback function
| can know how and where to put the output from the device.
@type write: False or tuple.
@param conn_timeout: Sets the connection timeout value. This is how
| we'll wait when connecting before classifying
| the device unreachable.
@type conn_timeout: int
@param sess_timeout: Sets the session timeout value. A higher value may
| be desired for long running commands, such as
| 'request system snapshot slice alternate'
@type sess_timeout: int
@param port: The port to connect to the device on. Defaults to 22.
@type port: int
@returns: We could return either just a string of the output from the
| device, or a tuple containing the information needed to write
| to a file and the string output from the device.
@rtype: Tuple or str | Below is the the instruction that describes the task:
### Input:
Open a Jaide session with the device.
To open a Jaide session to the device, and run the appropriate function
against the device. Arguments for the downstream function are passed
through.
@param ip: String of the IP or hostname of the device to connect to.
@type ip: str
@param username: The string username used to connect to the device.
@type useranme: str
@param password: The string password used to connect to the device.
@type password: str
@param function: The downstream jaide.wrap function we'll be handing
| off the jaide.Jaide() object to execute the command
| once we've established the connection.
@type function: function pointer.
@param args: The arguments that we will hand off to the downstream
| function.
@type args: list
@param write: If set, it would be a tuple that we pass back as part of
| our return statement, so that any callback function
| can know how and where to put the output from the device.
@type write: False or tuple.
@param conn_timeout: Sets the connection timeout value. This is how
| we'll wait when connecting before classifying
| the device unreachable.
@type conn_timeout: int
@param sess_timeout: Sets the session timeout value. A higher value may
| be desired for long running commands, such as
| 'request system snapshot slice alternate'
@type sess_timeout: int
@param port: The port to connect to the device on. Defaults to 22.
@type port: int
@returns: We could return either just a string of the output from the
| device, or a tuple containing the information needed to write
| to a file and the string output from the device.
@rtype: Tuple or str
### Response:
def open_connection(ip, username, password, function, args, write=False,
conn_timeout=5, sess_timeout=300, port=22):
""" Open a Jaide session with the device.
To open a Jaide session to the device, and run the appropriate function
against the device. Arguments for the downstream function are passed
through.
@param ip: String of the IP or hostname of the device to connect to.
@type ip: str
@param username: The string username used to connect to the device.
@type useranme: str
@param password: The string password used to connect to the device.
@type password: str
@param function: The downstream jaide.wrap function we'll be handing
| off the jaide.Jaide() object to execute the command
| once we've established the connection.
@type function: function pointer.
@param args: The arguments that we will hand off to the downstream
| function.
@type args: list
@param write: If set, it would be a tuple that we pass back as part of
| our return statement, so that any callback function
| can know how and where to put the output from the device.
@type write: False or tuple.
@param conn_timeout: Sets the connection timeout value. This is how
| we'll wait when connecting before classifying
| the device unreachable.
@type conn_timeout: int
@param sess_timeout: Sets the session timeout value. A higher value may
| be desired for long running commands, such as
| 'request system snapshot slice alternate'
@type sess_timeout: int
@param port: The port to connect to the device on. Defaults to 22.
@type port: int
@returns: We could return either just a string of the output from the
| device, or a tuple containing the information needed to write
| to a file and the string output from the device.
@rtype: Tuple or str
"""
# start with the header line on the output.
output = color('=' * 50 + '\nResults from device: %s\n' % ip, 'yel')
try:
# create the Jaide session object for the device.
conn = Jaide(ip, username, password, connect_timeout=conn_timeout,
session_timeout=sess_timeout, port=port)
if write is not False:
return write, output + function(conn, *args)
else:
return output + function(conn, *args)
except errors.SSHError:
output += color('Unable to connect to port %s on device: %s\n' %
(str(port), ip), 'red')
except errors.AuthenticationError: # NCClient auth failure
output += color('Authentication failed for device: %s' % ip, 'red')
except AuthenticationException: # Paramiko auth failure
output += color('Authentication failed for device: %s' % ip, 'red')
except SSHException as e:
output += color('Error connecting to device: %s\nError: %s' %
(ip, str(e)), 'red')
except socket.timeout:
output += color('Timeout exceeded connecting to device: %s' % ip, 'red')
except socket.gaierror:
output += color('No route to host, or invalid hostname: %s' % ip, 'red')
except socket.error:
output += color('The device refused the connection on port %s, or '
'no route to host.' % port, 'red')
if write is not False:
return write, output
else:
return output |
def __build_sign_query(saml_data, relay_state, algorithm, saml_type, lowercase_urlencoding=False):
"""
Build sign query
:param saml_data: The Request data
:type saml_data: str
:param relay_state: The Relay State
:type relay_state: str
:param algorithm: The Signature Algorithm
:type algorithm: str
:param saml_type: The target URL the user should be redirected to
:type saml_type: string SAMLRequest | SAMLResponse
:param lowercase_urlencoding: lowercase or no
:type lowercase_urlencoding: boolean
"""
sign_data = ['%s=%s' % (saml_type, OneLogin_Saml2_Utils.escape_url(saml_data, lowercase_urlencoding))]
if relay_state is not None:
sign_data.append('RelayState=%s' % OneLogin_Saml2_Utils.escape_url(relay_state, lowercase_urlencoding))
sign_data.append('SigAlg=%s' % OneLogin_Saml2_Utils.escape_url(algorithm, lowercase_urlencoding))
return '&'.join(sign_data) | Build sign query
:param saml_data: The Request data
:type saml_data: str
:param relay_state: The Relay State
:type relay_state: str
:param algorithm: The Signature Algorithm
:type algorithm: str
:param saml_type: The target URL the user should be redirected to
:type saml_type: string SAMLRequest | SAMLResponse
:param lowercase_urlencoding: lowercase or no
:type lowercase_urlencoding: boolean | Below is the the instruction that describes the task:
### Input:
Build sign query
:param saml_data: The Request data
:type saml_data: str
:param relay_state: The Relay State
:type relay_state: str
:param algorithm: The Signature Algorithm
:type algorithm: str
:param saml_type: The target URL the user should be redirected to
:type saml_type: string SAMLRequest | SAMLResponse
:param lowercase_urlencoding: lowercase or no
:type lowercase_urlencoding: boolean
### Response:
def __build_sign_query(saml_data, relay_state, algorithm, saml_type, lowercase_urlencoding=False):
"""
Build sign query
:param saml_data: The Request data
:type saml_data: str
:param relay_state: The Relay State
:type relay_state: str
:param algorithm: The Signature Algorithm
:type algorithm: str
:param saml_type: The target URL the user should be redirected to
:type saml_type: string SAMLRequest | SAMLResponse
:param lowercase_urlencoding: lowercase or no
:type lowercase_urlencoding: boolean
"""
sign_data = ['%s=%s' % (saml_type, OneLogin_Saml2_Utils.escape_url(saml_data, lowercase_urlencoding))]
if relay_state is not None:
sign_data.append('RelayState=%s' % OneLogin_Saml2_Utils.escape_url(relay_state, lowercase_urlencoding))
sign_data.append('SigAlg=%s' % OneLogin_Saml2_Utils.escape_url(algorithm, lowercase_urlencoding))
return '&'.join(sign_data) |
def get_sequence_rules(self):
"""Gets all ``SequenceRules``.
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment_authoring',
collection='SequenceRule',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.SequenceRuleList(result, runtime=self._runtime, proxy=self._proxy) | Gets all ``SequenceRules``.
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets all ``SequenceRules``.
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_sequence_rules(self):
"""Gets all ``SequenceRules``.
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment_authoring',
collection='SequenceRule',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.SequenceRuleList(result, runtime=self._runtime, proxy=self._proxy) |
def resolve(self, requirement_set):
# type: (RequirementSet) -> None
"""Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
"""
# make the wheelhouse
if self.preparer.wheel_download_dir:
ensure_dir(self.preparer.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = (
requirement_set.unnamed_requirements +
list(requirement_set.requirements.values())
)
self.require_hashes = (
requirement_set.require_hashes or
any(req.has_hash_options for req in root_reqs)
)
# Display where finder is looking for packages
locations = self.finder.get_formatted_locations()
if locations:
logger.info(locations)
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = [] # type: List[InstallRequirement]
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(
self._resolve_one(requirement_set, req)
)
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors | Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution. | Below is the the instruction that describes the task:
### Input:
Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
### Response:
def resolve(self, requirement_set):
# type: (RequirementSet) -> None
"""Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
"""
# make the wheelhouse
if self.preparer.wheel_download_dir:
ensure_dir(self.preparer.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = (
requirement_set.unnamed_requirements +
list(requirement_set.requirements.values())
)
self.require_hashes = (
requirement_set.require_hashes or
any(req.has_hash_options for req in root_reqs)
)
# Display where finder is looking for packages
locations = self.finder.get_formatted_locations()
if locations:
logger.info(locations)
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = [] # type: List[InstallRequirement]
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(
self._resolve_one(requirement_set, req)
)
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors |
def _filter_vcf(out_file):
"""Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference.
"""
in_file = out_file.replace(".vcf", "-ori.vcf")
FILTER_line = ('##FILTER=<ID=SBIAS,Description="Due to bias">\n'
'##FILTER=<ID=5BP,Description="Due to 5BP">\n'
'##FILTER=<ID=REJECT,Description="Not somatic due to qSNP filters">\n')
SOMATIC_line = '##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="somatic event">\n'
if not utils.file_exists(in_file):
shutil.move(out_file, in_file)
with file_transaction(out_file) as tx_out_file:
with open(in_file) as in_handle, open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("##normalSample="):
normal_name = line.strip().split("=")[1]
if line.startswith("##patient_id="):
tumor_name = line.strip().split("=")[1]
if line.startswith("#CHROM"):
line = line.replace("Normal", normal_name)
line = line.replace("Tumour", tumor_name)
if line.startswith("##INFO=<ID=FS"):
line = line.replace("ID=FS", "ID=RNT")
if line.find("FS=") > -1:
line = line.replace("FS=", "RNT=")
if "5BP" in line:
line = sub("5BP[0-9]+", "5BP", line)
if line.find("PASS") == -1:
line = _set_reject(line)
if line.find("PASS") > - 1 and line.find("SOMATIC") == -1:
line = _set_reject(line)
if not _has_ambiguous_ref_allele(line):
out_handle.write(line)
if line.startswith("##FILTER") and FILTER_line:
out_handle.write("%s" % FILTER_line)
FILTER_line = ""
if line.startswith("##INFO") and SOMATIC_line:
out_handle.write("%s" % SOMATIC_line)
SOMATIC_line = ""
return out_file | Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference. | Below is the the instruction that describes the task:
### Input:
Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference.
### Response:
def _filter_vcf(out_file):
"""Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference.
"""
in_file = out_file.replace(".vcf", "-ori.vcf")
FILTER_line = ('##FILTER=<ID=SBIAS,Description="Due to bias">\n'
'##FILTER=<ID=5BP,Description="Due to 5BP">\n'
'##FILTER=<ID=REJECT,Description="Not somatic due to qSNP filters">\n')
SOMATIC_line = '##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="somatic event">\n'
if not utils.file_exists(in_file):
shutil.move(out_file, in_file)
with file_transaction(out_file) as tx_out_file:
with open(in_file) as in_handle, open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("##normalSample="):
normal_name = line.strip().split("=")[1]
if line.startswith("##patient_id="):
tumor_name = line.strip().split("=")[1]
if line.startswith("#CHROM"):
line = line.replace("Normal", normal_name)
line = line.replace("Tumour", tumor_name)
if line.startswith("##INFO=<ID=FS"):
line = line.replace("ID=FS", "ID=RNT")
if line.find("FS=") > -1:
line = line.replace("FS=", "RNT=")
if "5BP" in line:
line = sub("5BP[0-9]+", "5BP", line)
if line.find("PASS") == -1:
line = _set_reject(line)
if line.find("PASS") > - 1 and line.find("SOMATIC") == -1:
line = _set_reject(line)
if not _has_ambiguous_ref_allele(line):
out_handle.write(line)
if line.startswith("##FILTER") and FILTER_line:
out_handle.write("%s" % FILTER_line)
FILTER_line = ""
if line.startswith("##INFO") and SOMATIC_line:
out_handle.write("%s" % SOMATIC_line)
SOMATIC_line = ""
return out_file |
def bindings_exist(name, jboss_config, bindings, profile=None):
'''
Ensures that given JNDI binding are present on the server.
If a binding doesn't exist on the server it will be created.
If it already exists its value will be changed.
jboss_config:
Dict with connection properties (see state description)
bindings:
Dict with bindings to set.
profile:
The profile name (domain mode only)
Example:
.. code-block:: yaml
jndi_entries_created:
jboss7.bindings_exist:
- bindings:
'java:global/sampleapp/environment': 'DEV'
'java:global/sampleapp/configurationFile': '/var/opt/sampleapp/config.properties'
- jboss_config: {{ pillar['jboss'] }}
'''
log.debug(" ======================== STATE: jboss7.bindings_exist (name: %s) (profile: %s) ", name, profile)
log.debug('bindings=%s', bindings)
ret = {'name': name,
'result': True,
'changes': {},
'comment': 'Bindings not changed.'}
has_changed = False
for key in bindings:
value = six.text_type(bindings[key])
query_result = __salt__['jboss7.read_simple_binding'](binding_name=key, jboss_config=jboss_config, profile=profile)
if query_result['success']:
current_value = query_result['result']['value']
if current_value != value:
update_result = __salt__['jboss7.update_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile)
if update_result['success']:
has_changed = True
__log_binding_change(ret['changes'], 'changed', key, value, current_value)
else:
raise CommandExecutionError(update_result['failure-description'])
else:
if query_result['err_code'] == 'JBAS014807': # ok, resource not exists:
create_result = __salt__['jboss7.create_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile)
if create_result['success']:
has_changed = True
__log_binding_change(ret['changes'], 'added', key, value)
else:
raise CommandExecutionError(create_result['failure-description'])
else:
raise CommandExecutionError(query_result['failure-description'])
if has_changed:
ret['comment'] = 'Bindings changed.'
return ret | Ensures that given JNDI binding are present on the server.
If a binding doesn't exist on the server it will be created.
If it already exists its value will be changed.
jboss_config:
Dict with connection properties (see state description)
bindings:
Dict with bindings to set.
profile:
The profile name (domain mode only)
Example:
.. code-block:: yaml
jndi_entries_created:
jboss7.bindings_exist:
- bindings:
'java:global/sampleapp/environment': 'DEV'
'java:global/sampleapp/configurationFile': '/var/opt/sampleapp/config.properties'
- jboss_config: {{ pillar['jboss'] }} | Below is the the instruction that describes the task:
### Input:
Ensures that given JNDI binding are present on the server.
If a binding doesn't exist on the server it will be created.
If it already exists its value will be changed.
jboss_config:
Dict with connection properties (see state description)
bindings:
Dict with bindings to set.
profile:
The profile name (domain mode only)
Example:
.. code-block:: yaml
jndi_entries_created:
jboss7.bindings_exist:
- bindings:
'java:global/sampleapp/environment': 'DEV'
'java:global/sampleapp/configurationFile': '/var/opt/sampleapp/config.properties'
- jboss_config: {{ pillar['jboss'] }}
### Response:
def bindings_exist(name, jboss_config, bindings, profile=None):
'''
Ensures that given JNDI binding are present on the server.
If a binding doesn't exist on the server it will be created.
If it already exists its value will be changed.
jboss_config:
Dict with connection properties (see state description)
bindings:
Dict with bindings to set.
profile:
The profile name (domain mode only)
Example:
.. code-block:: yaml
jndi_entries_created:
jboss7.bindings_exist:
- bindings:
'java:global/sampleapp/environment': 'DEV'
'java:global/sampleapp/configurationFile': '/var/opt/sampleapp/config.properties'
- jboss_config: {{ pillar['jboss'] }}
'''
log.debug(" ======================== STATE: jboss7.bindings_exist (name: %s) (profile: %s) ", name, profile)
log.debug('bindings=%s', bindings)
ret = {'name': name,
'result': True,
'changes': {},
'comment': 'Bindings not changed.'}
has_changed = False
for key in bindings:
value = six.text_type(bindings[key])
query_result = __salt__['jboss7.read_simple_binding'](binding_name=key, jboss_config=jboss_config, profile=profile)
if query_result['success']:
current_value = query_result['result']['value']
if current_value != value:
update_result = __salt__['jboss7.update_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile)
if update_result['success']:
has_changed = True
__log_binding_change(ret['changes'], 'changed', key, value, current_value)
else:
raise CommandExecutionError(update_result['failure-description'])
else:
if query_result['err_code'] == 'JBAS014807': # ok, resource not exists:
create_result = __salt__['jboss7.create_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile)
if create_result['success']:
has_changed = True
__log_binding_change(ret['changes'], 'added', key, value)
else:
raise CommandExecutionError(create_result['failure-description'])
else:
raise CommandExecutionError(query_result['failure-description'])
if has_changed:
ret['comment'] = 'Bindings changed.'
return ret |
def run_all(logdir, verbose=False):
"""Run simulations on a reasonable set of parameters.
Arguments:
logdir: the directory into which to store all the runs' data
verbose: if true, print out each run's name as it begins
"""
run_box_to_gaussian(logdir, verbose=verbose)
run_sobel(logdir, verbose=verbose) | Run simulations on a reasonable set of parameters.
Arguments:
logdir: the directory into which to store all the runs' data
verbose: if true, print out each run's name as it begins | Below is the the instruction that describes the task:
### Input:
Run simulations on a reasonable set of parameters.
Arguments:
logdir: the directory into which to store all the runs' data
verbose: if true, print out each run's name as it begins
### Response:
def run_all(logdir, verbose=False):
"""Run simulations on a reasonable set of parameters.
Arguments:
logdir: the directory into which to store all the runs' data
verbose: if true, print out each run's name as it begins
"""
run_box_to_gaussian(logdir, verbose=verbose)
run_sobel(logdir, verbose=verbose) |
def accept(self, *args):
'''Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None.
'''
token = self.peek()
if token is None:
return None
for arg in args:
if token.type == arg:
self.position += 1
return token
return None | Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None. | Below is the the instruction that describes the task:
### Input:
Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None.
### Response:
def accept(self, *args):
'''Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None.
'''
token = self.peek()
if token is None:
return None
for arg in args:
if token.type == arg:
self.position += 1
return token
return None |
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count | Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`. | Below is the the instruction that describes the task:
### Input:
Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
### Response:
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count |
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
parser.allow_interspersed_args = ctx.allow_interspersed_args
parser.ignore_unknown_options = ctx.ignore_unknown_options
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser | Creates the underlying option parser for this command. | Below is the the instruction that describes the task:
### Input:
Creates the underlying option parser for this command.
### Response:
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
parser.allow_interspersed_args = ctx.allow_interspersed_args
parser.ignore_unknown_options = ctx.ignore_unknown_options
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser |
def create_module_docs():
"""
Create documentation for modules.
"""
data = core_module_docstrings(format="rst")
# get screenshot data
screenshots_data = {}
samples = get_samples()
for sample in samples.keys():
module = sample.split("-")[0]
if module not in screenshots_data:
screenshots_data[module] = []
screenshots_data[module].append(sample)
out = []
# details
for module in sorted(data.keys()):
out.append("\n.. _module_%s:\n" % module) # reference for linking
out.append(
"\n{name}\n{underline}\n\n{screenshots}{details}\n".format(
name=module,
screenshots=screenshots(screenshots_data, module),
underline="-" * len(module),
details="".join(markdown_2_rst(data[module])).strip(),
)
)
# write include file
with open("../doc/modules-info.inc", "w") as f:
f.write("".join(out)) | Create documentation for modules. | Below is the the instruction that describes the task:
### Input:
Create documentation for modules.
### Response:
def create_module_docs():
"""
Create documentation for modules.
"""
data = core_module_docstrings(format="rst")
# get screenshot data
screenshots_data = {}
samples = get_samples()
for sample in samples.keys():
module = sample.split("-")[0]
if module not in screenshots_data:
screenshots_data[module] = []
screenshots_data[module].append(sample)
out = []
# details
for module in sorted(data.keys()):
out.append("\n.. _module_%s:\n" % module) # reference for linking
out.append(
"\n{name}\n{underline}\n\n{screenshots}{details}\n".format(
name=module,
screenshots=screenshots(screenshots_data, module),
underline="-" * len(module),
details="".join(markdown_2_rst(data[module])).strip(),
)
)
# write include file
with open("../doc/modules-info.inc", "w") as f:
f.write("".join(out)) |
def parallel_part(data, parallel):
"""parallel_part(data, parallel) -> part
Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable.
**Parameters:**
``data`` : [object]
A list of data that should be split up into ``parallel`` parts
``parallel`` : int or ``None``
The total number of parts, in which the data should be split into
**Returns:**
``part`` : [object]
The desired partition of the ``data``
"""
if parallel is None or "SGE_TASK_ID" not in os.environ:
return data
data_per_job = int(math.ceil(float(len(data)) / float(parallel)))
task_id = int(os.environ['SGE_TASK_ID'])
first = (task_id-1) * data_per_job
last = min(len(data), task_id * data_per_job)
return data[first:last] | parallel_part(data, parallel) -> part
Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable.
**Parameters:**
``data`` : [object]
A list of data that should be split up into ``parallel`` parts
``parallel`` : int or ``None``
The total number of parts, in which the data should be split into
**Returns:**
``part`` : [object]
The desired partition of the ``data`` | Below is the the instruction that describes the task:
### Input:
parallel_part(data, parallel) -> part
Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable.
**Parameters:**
``data`` : [object]
A list of data that should be split up into ``parallel`` parts
``parallel`` : int or ``None``
The total number of parts, in which the data should be split into
**Returns:**
``part`` : [object]
The desired partition of the ``data``
### Response:
def parallel_part(data, parallel):
"""parallel_part(data, parallel) -> part
Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable.
**Parameters:**
``data`` : [object]
A list of data that should be split up into ``parallel`` parts
``parallel`` : int or ``None``
The total number of parts, in which the data should be split into
**Returns:**
``part`` : [object]
The desired partition of the ``data``
"""
if parallel is None or "SGE_TASK_ID" not in os.environ:
return data
data_per_job = int(math.ceil(float(len(data)) / float(parallel)))
task_id = int(os.environ['SGE_TASK_ID'])
first = (task_id-1) * data_per_job
last = min(len(data), task_id * data_per_job)
return data[first:last] |
def convert_to_xml(cls, degrees):
"""Convert signed angle float like -427.42 to int 60000 per degree.
Value is normalized to a positive value less than 360 degrees.
"""
if degrees < 0.0:
degrees %= -360
degrees += 360
elif degrees > 0.0:
degrees %= 360
return str(int(round(degrees * cls.DEGREE_INCREMENTS))) | Convert signed angle float like -427.42 to int 60000 per degree.
Value is normalized to a positive value less than 360 degrees. | Below is the the instruction that describes the task:
### Input:
Convert signed angle float like -427.42 to int 60000 per degree.
Value is normalized to a positive value less than 360 degrees.
### Response:
def convert_to_xml(cls, degrees):
"""Convert signed angle float like -427.42 to int 60000 per degree.
Value is normalized to a positive value less than 360 degrees.
"""
if degrees < 0.0:
degrees %= -360
degrees += 360
elif degrees > 0.0:
degrees %= 360
return str(int(round(degrees * cls.DEGREE_INCREMENTS))) |
def add_skip_ci_to_commit_msg(message: str) -> str:
"""
Adds a "[skip ci]" tag at the end of a (possibly multi-line) commit message
:param message: commit message
:type message: str
:return: edited commit message
:rtype: str
"""
first_line_index = message.find('\n')
if first_line_index == -1:
edited_message = message + ' [skip ci]'
else:
edited_message = message[:first_line_index] + ' [skip ci]' + message[first_line_index:]
LOGGER.debug('edited commit message: %s', edited_message)
return edited_message | Adds a "[skip ci]" tag at the end of a (possibly multi-line) commit message
:param message: commit message
:type message: str
:return: edited commit message
:rtype: str | Below is the the instruction that describes the task:
### Input:
Adds a "[skip ci]" tag at the end of a (possibly multi-line) commit message
:param message: commit message
:type message: str
:return: edited commit message
:rtype: str
### Response:
def add_skip_ci_to_commit_msg(message: str) -> str:
"""
Adds a "[skip ci]" tag at the end of a (possibly multi-line) commit message
:param message: commit message
:type message: str
:return: edited commit message
:rtype: str
"""
first_line_index = message.find('\n')
if first_line_index == -1:
edited_message = message + ' [skip ci]'
else:
edited_message = message[:first_line_index] + ' [skip ci]' + message[first_line_index:]
LOGGER.debug('edited commit message: %s', edited_message)
return edited_message |
def mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
mtype = m.get_type()
if mtype in ['WAYPOINT_COUNT','MISSION_COUNT']:
if self.wp_op is None:
self.console.error("No waypoint load started")
else:
self.wploader.clear()
self.wploader.expected_count = m.count
self.console.writeln("Requesting %u waypoints t=%s now=%s" % (m.count,
time.asctime(time.localtime(m._timestamp)),
time.asctime()))
self.master.waypoint_request_send(0)
elif mtype in ['WAYPOINT', 'MISSION_ITEM'] and self.wp_op != None:
if m.seq > self.wploader.count():
self.console.writeln("Unexpected waypoint number %u - expected %u" % (m.seq, self.wploader.count()))
elif m.seq < self.wploader.count():
# a duplicate
pass
else:
self.wploader.add(m)
if m.seq+1 < self.wploader.expected_count:
self.master.waypoint_request_send(m.seq+1)
else:
if self.wp_op == 'list':
for i in range(self.wploader.count()):
w = self.wploader.wp(i)
print("%u %u %.10f %.10f %f p1=%.1f p2=%.1f p3=%.1f p4=%.1f cur=%u auto=%u" % (
w.command, w.frame, w.x, w.y, w.z,
w.param1, w.param2, w.param3, w.param4,
w.current, w.autocontinue))
if self.logdir != None:
waytxt = os.path.join(self.logdir, 'way.txt')
self.save_waypoints(waytxt)
print("Saved waypoints to %s" % waytxt)
elif self.wp_op == "save":
self.save_waypoints(self.wp_save_filename)
self.wp_op = None
elif mtype in ["WAYPOINT_REQUEST", "MISSION_REQUEST"]:
self.process_waypoint_request(m, self.master)
elif mtype in ["WAYPOINT_CURRENT", "MISSION_CURRENT"]:
if m.seq != self.last_waypoint:
self.last_waypoint = m.seq
if self.settings.wpupdates:
self.say("waypoint %u" % m.seq,priority='message') | handle an incoming mavlink packet | Below is the the instruction that describes the task:
### Input:
handle an incoming mavlink packet
### Response:
def mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
mtype = m.get_type()
if mtype in ['WAYPOINT_COUNT','MISSION_COUNT']:
if self.wp_op is None:
self.console.error("No waypoint load started")
else:
self.wploader.clear()
self.wploader.expected_count = m.count
self.console.writeln("Requesting %u waypoints t=%s now=%s" % (m.count,
time.asctime(time.localtime(m._timestamp)),
time.asctime()))
self.master.waypoint_request_send(0)
elif mtype in ['WAYPOINT', 'MISSION_ITEM'] and self.wp_op != None:
if m.seq > self.wploader.count():
self.console.writeln("Unexpected waypoint number %u - expected %u" % (m.seq, self.wploader.count()))
elif m.seq < self.wploader.count():
# a duplicate
pass
else:
self.wploader.add(m)
if m.seq+1 < self.wploader.expected_count:
self.master.waypoint_request_send(m.seq+1)
else:
if self.wp_op == 'list':
for i in range(self.wploader.count()):
w = self.wploader.wp(i)
print("%u %u %.10f %.10f %f p1=%.1f p2=%.1f p3=%.1f p4=%.1f cur=%u auto=%u" % (
w.command, w.frame, w.x, w.y, w.z,
w.param1, w.param2, w.param3, w.param4,
w.current, w.autocontinue))
if self.logdir != None:
waytxt = os.path.join(self.logdir, 'way.txt')
self.save_waypoints(waytxt)
print("Saved waypoints to %s" % waytxt)
elif self.wp_op == "save":
self.save_waypoints(self.wp_save_filename)
self.wp_op = None
elif mtype in ["WAYPOINT_REQUEST", "MISSION_REQUEST"]:
self.process_waypoint_request(m, self.master)
elif mtype in ["WAYPOINT_CURRENT", "MISSION_CURRENT"]:
if m.seq != self.last_waypoint:
self.last_waypoint = m.seq
if self.settings.wpupdates:
self.say("waypoint %u" % m.seq,priority='message') |
def bubble_at_T(zs, Psats, fugacities=None, gammas=None):
'''
>>> bubble_at_T([0.5, 0.5], [1400, 7000])
4200.0
>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75])
3395.0
>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75], fugacities=[.995, 0.98])
3452.440775305097
'''
if not fugacities:
fugacities = [1 for i in range(len(Psats))]
if not gammas:
gammas = [1 for i in range(len(Psats))]
if not none_and_length_check((zs, Psats, fugacities, gammas)):
raise Exception('Input dimentions are inconsistent or some input parameters are missing.')
P = sum(zs[i]*Psats[i]*gammas[i]/fugacities[i] for i in range(len(zs)))
return P | >>> bubble_at_T([0.5, 0.5], [1400, 7000])
4200.0
>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75])
3395.0
>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75], fugacities=[.995, 0.98])
3452.440775305097 | Below is the the instruction that describes the task:
### Input:
>>> bubble_at_T([0.5, 0.5], [1400, 7000])
4200.0
>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75])
3395.0
>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75], fugacities=[.995, 0.98])
3452.440775305097
### Response:
def bubble_at_T(zs, Psats, fugacities=None, gammas=None):
'''
>>> bubble_at_T([0.5, 0.5], [1400, 7000])
4200.0
>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75])
3395.0
>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75], fugacities=[.995, 0.98])
3452.440775305097
'''
if not fugacities:
fugacities = [1 for i in range(len(Psats))]
if not gammas:
gammas = [1 for i in range(len(Psats))]
if not none_and_length_check((zs, Psats, fugacities, gammas)):
raise Exception('Input dimentions are inconsistent or some input parameters are missing.')
P = sum(zs[i]*Psats[i]*gammas[i]/fugacities[i] for i in range(len(zs)))
return P |
def init_storage():
"""
Initialize the local dictionnaries cache in ~/.crosswords/dicts.
"""
# http://stackoverflow.com/a/600612/735926
try:
os.makedirs(DICTS_PATH)
except OSError as ex:
if ex.errno != errno.EEXIST or not os.path.isdir(DICTS_PATH):
raise | Initialize the local dictionnaries cache in ~/.crosswords/dicts. | Below is the the instruction that describes the task:
### Input:
Initialize the local dictionnaries cache in ~/.crosswords/dicts.
### Response:
def init_storage():
"""
Initialize the local dictionnaries cache in ~/.crosswords/dicts.
"""
# http://stackoverflow.com/a/600612/735926
try:
os.makedirs(DICTS_PATH)
except OSError as ex:
if ex.errno != errno.EEXIST or not os.path.isdir(DICTS_PATH):
raise |
def set_elements_tail(parent_to_parse, element_path=None, tail_values=None):
"""
Assigns an array of tail values to each of the elements parsed from the parent. The
tail values are assigned in the same order they are provided.
If there are less values then elements, the remaining elements are skipped; but if
there are more, new elements will be inserted for each with the remaining tail values.
"""
if tail_values is None:
tail_values = []
return _set_elements_property(parent_to_parse, element_path, _ELEM_TAIL, tail_values) | Assigns an array of tail values to each of the elements parsed from the parent. The
tail values are assigned in the same order they are provided.
If there are less values then elements, the remaining elements are skipped; but if
there are more, new elements will be inserted for each with the remaining tail values. | Below is the the instruction that describes the task:
### Input:
Assigns an array of tail values to each of the elements parsed from the parent. The
tail values are assigned in the same order they are provided.
If there are less values then elements, the remaining elements are skipped; but if
there are more, new elements will be inserted for each with the remaining tail values.
### Response:
def set_elements_tail(parent_to_parse, element_path=None, tail_values=None):
"""
Assigns an array of tail values to each of the elements parsed from the parent. The
tail values are assigned in the same order they are provided.
If there are less values then elements, the remaining elements are skipped; but if
there are more, new elements will be inserted for each with the remaining tail values.
"""
if tail_values is None:
tail_values = []
return _set_elements_property(parent_to_parse, element_path, _ELEM_TAIL, tail_values) |
def get_domain(url):
""" Returns domain name portion of a URL """
if 'http' not in url.lower():
url = 'http://{}'.format(url)
return urllib.parse.urlparse(url).hostname | Returns domain name portion of a URL | Below is the the instruction that describes the task:
### Input:
Returns domain name portion of a URL
### Response:
def get_domain(url):
""" Returns domain name portion of a URL """
if 'http' not in url.lower():
url = 'http://{}'.format(url)
return urllib.parse.urlparse(url).hostname |
def dictlist_to_tsv(dictlist: List[Dict[str, Any]]) -> str:
"""
From a consistent list of dictionaries mapping fieldnames to values,
make a TSV file.
"""
if not dictlist:
return ""
fieldnames = dictlist[0].keys()
tsv = "\t".join([tsv_escape(f) for f in fieldnames]) + "\n"
for d in dictlist:
tsv += "\t".join([tsv_escape(v) for v in d.values()]) + "\n"
return tsv | From a consistent list of dictionaries mapping fieldnames to values,
make a TSV file. | Below is the the instruction that describes the task:
### Input:
From a consistent list of dictionaries mapping fieldnames to values,
make a TSV file.
### Response:
def dictlist_to_tsv(dictlist: List[Dict[str, Any]]) -> str:
"""
From a consistent list of dictionaries mapping fieldnames to values,
make a TSV file.
"""
if not dictlist:
return ""
fieldnames = dictlist[0].keys()
tsv = "\t".join([tsv_escape(f) for f in fieldnames]) + "\n"
for d in dictlist:
tsv += "\t".join([tsv_escape(v) for v in d.values()]) + "\n"
return tsv |
def convert_surrogate_pair(match):
"""
Convert a surrogate pair to the single codepoint it represents.
This implements the formula described at:
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
"""
pair = match.group(0)
codept = 0x10000 + (ord(pair[0]) - 0xd800) * 0x400 + (ord(pair[1]) - 0xdc00)
return chr(codept) | Convert a surrogate pair to the single codepoint it represents.
This implements the formula described at:
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates | Below is the the instruction that describes the task:
### Input:
Convert a surrogate pair to the single codepoint it represents.
This implements the formula described at:
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
### Response:
def convert_surrogate_pair(match):
"""
Convert a surrogate pair to the single codepoint it represents.
This implements the formula described at:
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
"""
pair = match.group(0)
codept = 0x10000 + (ord(pair[0]) - 0xd800) * 0x400 + (ord(pair[1]) - 0xdc00)
return chr(codept) |
def init(name, subnames, dest, skeleton, description, project_type, skip_core):
"""Creates a standalone, subprojects or submodules script sctrucure"""
dest = dest or CUR_DIR
skeleton = join(skeleton or SKEL_PATH, project_type)
project = join(dest, name)
script = join(project, name + '.py')
core = join(project, name)
if project_type == 'standalone':
renames = [
(join(project, 'project.py'), script),
(join(project, 'project'), core)]
copy_skeleton(
name, skeleton, project, renames=renames, description=description,
ignore=False)
else:
renames = [
(join(project, 'project.py'), script),
(join(project, 'project'), core)]
exclude_dirs = ['submodule'] + (['project'] if skip_core else [])
copy_skeleton(
name, skeleton, project, renames=renames, description=description,
exclude_dirs=exclude_dirs, ignore=True)
for subname in subnames:
renames = [
(join(project, 'submodule'), join(project, subname))
]
copy_skeleton(
subname, skeleton, project, renames=renames,
description=description, ignore=True,
exclude_dirs=['project'], exclude_files=['project.py'])
return 0, "\n{}\n".format(project) | Creates a standalone, subprojects or submodules script sctrucure | Below is the the instruction that describes the task:
### Input:
Creates a standalone, subprojects or submodules script sctrucure
### Response:
def init(name, subnames, dest, skeleton, description, project_type, skip_core):
"""Creates a standalone, subprojects or submodules script sctrucure"""
dest = dest or CUR_DIR
skeleton = join(skeleton or SKEL_PATH, project_type)
project = join(dest, name)
script = join(project, name + '.py')
core = join(project, name)
if project_type == 'standalone':
renames = [
(join(project, 'project.py'), script),
(join(project, 'project'), core)]
copy_skeleton(
name, skeleton, project, renames=renames, description=description,
ignore=False)
else:
renames = [
(join(project, 'project.py'), script),
(join(project, 'project'), core)]
exclude_dirs = ['submodule'] + (['project'] if skip_core else [])
copy_skeleton(
name, skeleton, project, renames=renames, description=description,
exclude_dirs=exclude_dirs, ignore=True)
for subname in subnames:
renames = [
(join(project, 'submodule'), join(project, subname))
]
copy_skeleton(
subname, skeleton, project, renames=renames,
description=description, ignore=True,
exclude_dirs=['project'], exclude_files=['project.py'])
return 0, "\n{}\n".format(project) |
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(DictStruct, self).unpack(s)) | Parse bytes and return a namedtuple. | Below is the the instruction that describes the task:
### Input:
Parse bytes and return a namedtuple.
### Response:
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(DictStruct, self).unpack(s)) |
def detach_session(self):
"""Allow the session to be discarded and don't get change notifications from it anymore"""
if self._session is not None:
self._session.unsubscribe(self)
self._session = None | Allow the session to be discarded and don't get change notifications from it anymore | Below is the the instruction that describes the task:
### Input:
Allow the session to be discarded and don't get change notifications from it anymore
### Response:
def detach_session(self):
"""Allow the session to be discarded and don't get change notifications from it anymore"""
if self._session is not None:
self._session.unsubscribe(self)
self._session = None |
def wait_socks(sock_events, inmask=1, outmask=2, timeout=None):
"""wait on a combination of zeromq sockets, normal sockets, and fds
.. note:: this method can block
it will return once there is relevant activity on any of the
descriptors or sockets, or the timeout expires
:param sock_events:
two-tuples, the first item is either a zeromq socket, a socket, or a
file descriptor, and the second item is a mask made up of the inmask
and/or the outmask bitwise-ORd together
:type sock_events: list
:param inmask: the mask to use for readable events (default 1)
:type inmask: int
:param outmask: the mask to use for writable events (default 2)
:type outmask: int
:param timeout: the maximum time to block before raising an exception
:type timeout: int, float or None
:returns:
a list of two-tuples, each has one of the first elements from
``sock_events``, the second element is the event mask of the activity
that was detected (made up on inmask and/or outmask bitwise-ORd
together)
"""
results = []
for sock, mask in sock_events:
if isinstance(sock, zmq.backend.Socket):
mask = _check_events(sock, mask, inmask, outmask)
if mask:
results.append((sock, mask))
if results:
return results
fd_map = {}
fd_events = []
for sock, mask in sock_events:
if isinstance(sock, zmq.backend.Socket):
fd = sock.getsockopt(zmq.FD)
elif isinstance(sock, int):
fd = sock
else:
fd = sock.fileno()
fd_map[fd] = sock
fd_events.append((fd, mask))
while 1:
started = time.time()
active = descriptor.wait_fds(fd_events, inmask, outmask, timeout)
if not active:
# timed out
return []
results = []
for fd, mask in active:
sock = fd_map[fd]
if isinstance(sock, zmq.backend.Socket):
mask = _check_events(sock, mask, inmask, outmask)
if not mask:
continue
results.append((sock, mask))
if results:
return results
timeout -= time.time() - started | wait on a combination of zeromq sockets, normal sockets, and fds
.. note:: this method can block
it will return once there is relevant activity on any of the
descriptors or sockets, or the timeout expires
:param sock_events:
two-tuples, the first item is either a zeromq socket, a socket, or a
file descriptor, and the second item is a mask made up of the inmask
and/or the outmask bitwise-ORd together
:type sock_events: list
:param inmask: the mask to use for readable events (default 1)
:type inmask: int
:param outmask: the mask to use for writable events (default 2)
:type outmask: int
:param timeout: the maximum time to block before raising an exception
:type timeout: int, float or None
:returns:
a list of two-tuples, each has one of the first elements from
``sock_events``, the second element is the event mask of the activity
that was detected (made up on inmask and/or outmask bitwise-ORd
together) | Below is the the instruction that describes the task:
### Input:
wait on a combination of zeromq sockets, normal sockets, and fds
.. note:: this method can block
it will return once there is relevant activity on any of the
descriptors or sockets, or the timeout expires
:param sock_events:
two-tuples, the first item is either a zeromq socket, a socket, or a
file descriptor, and the second item is a mask made up of the inmask
and/or the outmask bitwise-ORd together
:type sock_events: list
:param inmask: the mask to use for readable events (default 1)
:type inmask: int
:param outmask: the mask to use for writable events (default 2)
:type outmask: int
:param timeout: the maximum time to block before raising an exception
:type timeout: int, float or None
:returns:
a list of two-tuples, each has one of the first elements from
``sock_events``, the second element is the event mask of the activity
that was detected (made up on inmask and/or outmask bitwise-ORd
together)
### Response:
def wait_socks(sock_events, inmask=1, outmask=2, timeout=None):
"""wait on a combination of zeromq sockets, normal sockets, and fds
.. note:: this method can block
it will return once there is relevant activity on any of the
descriptors or sockets, or the timeout expires
:param sock_events:
two-tuples, the first item is either a zeromq socket, a socket, or a
file descriptor, and the second item is a mask made up of the inmask
and/or the outmask bitwise-ORd together
:type sock_events: list
:param inmask: the mask to use for readable events (default 1)
:type inmask: int
:param outmask: the mask to use for writable events (default 2)
:type outmask: int
:param timeout: the maximum time to block before raising an exception
:type timeout: int, float or None
:returns:
a list of two-tuples, each has one of the first elements from
``sock_events``, the second element is the event mask of the activity
that was detected (made up on inmask and/or outmask bitwise-ORd
together)
"""
results = []
for sock, mask in sock_events:
if isinstance(sock, zmq.backend.Socket):
mask = _check_events(sock, mask, inmask, outmask)
if mask:
results.append((sock, mask))
if results:
return results
fd_map = {}
fd_events = []
for sock, mask in sock_events:
if isinstance(sock, zmq.backend.Socket):
fd = sock.getsockopt(zmq.FD)
elif isinstance(sock, int):
fd = sock
else:
fd = sock.fileno()
fd_map[fd] = sock
fd_events.append((fd, mask))
while 1:
started = time.time()
active = descriptor.wait_fds(fd_events, inmask, outmask, timeout)
if not active:
# timed out
return []
results = []
for fd, mask in active:
sock = fd_map[fd]
if isinstance(sock, zmq.backend.Socket):
mask = _check_events(sock, mask, inmask, outmask)
if not mask:
continue
results.append((sock, mask))
if results:
return results
timeout -= time.time() - started |
def isNonNull(requestContext, seriesList):
"""
Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
"""
def transform(v):
if v is None:
return 0
else:
return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList | Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values. | Below is the the instruction that describes the task:
### Input:
Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
### Response:
def isNonNull(requestContext, seriesList):
"""
Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
"""
def transform(v):
if v is None:
return 0
else:
return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList |
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value)) | Format of the (string) object. | Below is the the instruction that describes the task:
### Input:
Format of the (string) object.
### Response:
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value)) |
def _cleanly_slice_encoded_string(encoded_string, length_limit):
"""
Takes a byte string (a UTF-8 encoded string) and splits it into two pieces such that the first slice is no
longer than argument `length_limit`, then returns a tuple containing the first slice and remainder of the
byte string, respectively. The first slice may actually be shorter than `length_limit`, because this ensures
that the string does not get split in the middle of a multi-byte character.
This works because the first byte in a multi-byte unicode character encodes how many bytes compose that
character, so we can determine empirically if we are splitting in the middle of the character and correct for
that.
You can read more about how this works here: https://en.wikipedia.org/wiki/UTF-8#Description
:param encoded_string: The encoded string to split in two
:param length_limit: The maximum length allowed for the first slice of the string
:return: A tuple of (slice, remaining)
"""
sliced, remaining = encoded_string[:length_limit], encoded_string[length_limit:]
try:
sliced.decode('utf-8')
except UnicodeDecodeError as e:
sliced, remaining = sliced[:e.start], sliced[e.start:] + remaining
return sliced, remaining | Takes a byte string (a UTF-8 encoded string) and splits it into two pieces such that the first slice is no
longer than argument `length_limit`, then returns a tuple containing the first slice and remainder of the
byte string, respectively. The first slice may actually be shorter than `length_limit`, because this ensures
that the string does not get split in the middle of a multi-byte character.
This works because the first byte in a multi-byte unicode character encodes how many bytes compose that
character, so we can determine empirically if we are splitting in the middle of the character and correct for
that.
You can read more about how this works here: https://en.wikipedia.org/wiki/UTF-8#Description
:param encoded_string: The encoded string to split in two
:param length_limit: The maximum length allowed for the first slice of the string
:return: A tuple of (slice, remaining) | Below is the the instruction that describes the task:
### Input:
Takes a byte string (a UTF-8 encoded string) and splits it into two pieces such that the first slice is no
longer than argument `length_limit`, then returns a tuple containing the first slice and remainder of the
byte string, respectively. The first slice may actually be shorter than `length_limit`, because this ensures
that the string does not get split in the middle of a multi-byte character.
This works because the first byte in a multi-byte unicode character encodes how many bytes compose that
character, so we can determine empirically if we are splitting in the middle of the character and correct for
that.
You can read more about how this works here: https://en.wikipedia.org/wiki/UTF-8#Description
:param encoded_string: The encoded string to split in two
:param length_limit: The maximum length allowed for the first slice of the string
:return: A tuple of (slice, remaining)
### Response:
def _cleanly_slice_encoded_string(encoded_string, length_limit):
"""
Takes a byte string (a UTF-8 encoded string) and splits it into two pieces such that the first slice is no
longer than argument `length_limit`, then returns a tuple containing the first slice and remainder of the
byte string, respectively. The first slice may actually be shorter than `length_limit`, because this ensures
that the string does not get split in the middle of a multi-byte character.
This works because the first byte in a multi-byte unicode character encodes how many bytes compose that
character, so we can determine empirically if we are splitting in the middle of the character and correct for
that.
You can read more about how this works here: https://en.wikipedia.org/wiki/UTF-8#Description
:param encoded_string: The encoded string to split in two
:param length_limit: The maximum length allowed for the first slice of the string
:return: A tuple of (slice, remaining)
"""
sliced, remaining = encoded_string[:length_limit], encoded_string[length_limit:]
try:
sliced.decode('utf-8')
except UnicodeDecodeError as e:
sliced, remaining = sliced[:e.start], sliced[e.start:] + remaining
return sliced, remaining |
def parse_xml(data, handle_units):
"""Parse XML data returned by NCSS."""
root = ET.fromstring(data)
return squish(parse_xml_dataset(root, handle_units)) | Parse XML data returned by NCSS. | Below is the the instruction that describes the task:
### Input:
Parse XML data returned by NCSS.
### Response:
def parse_xml(data, handle_units):
"""Parse XML data returned by NCSS."""
root = ET.fromstring(data)
return squish(parse_xml_dataset(root, handle_units)) |
def high_cli(repo_name, login, with_blog, as_list, role):
"""Extract mails from stargazers, collaborators and people involved with issues of given
repository.
"""
passw = getpass.getpass()
github = gh_login(login, passw)
repo = github.repository(login, repo_name)
role = [ROLES[k] for k in role]
users = fetch_logins(role, repo)
mails, blogs = contacts(github, users)
if 'issue' in role:
mails |= extract_mail(repo.issues(state='all'))
# Print results
sep = ', ' if as_list else '\n'
print(sep.join(mails))
if with_blog:
print(sep.join(blogs)) | Extract mails from stargazers, collaborators and people involved with issues of given
repository. | Below is the the instruction that describes the task:
### Input:
Extract mails from stargazers, collaborators and people involved with issues of given
repository.
### Response:
def high_cli(repo_name, login, with_blog, as_list, role):
"""Extract mails from stargazers, collaborators and people involved with issues of given
repository.
"""
passw = getpass.getpass()
github = gh_login(login, passw)
repo = github.repository(login, repo_name)
role = [ROLES[k] for k in role]
users = fetch_logins(role, repo)
mails, blogs = contacts(github, users)
if 'issue' in role:
mails |= extract_mail(repo.issues(state='all'))
# Print results
sep = ', ' if as_list else '\n'
print(sep.join(mails))
if with_blog:
print(sep.join(blogs)) |
def get_eqsl_users(**kwargs):
"""Download the latest official list of `EQSL.cc`__ users. The list of users can be found here_.
Args:
url (str, optional): Download URL
Returns:
list: List containing the callsigns of EQSL users (unicode)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the EQSL user list and checks if DH1TW is a user:
>>> from pyhamtools.qsl import get_eqsl_users
>>> mylist = get_eqsl_users()
>>> try:
>>> mylist.index('DH1TW')
>>> except ValueError as e:
>>> print e
'DH1TW' is not in list
.. _here: http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt
"""
url = ""
eqsl = []
try:
url = kwargs['url']
except KeyError:
url = "http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt"
try:
result = requests.get(url)
except (ConnectionError, HTTPError, Timeout) as e:
raise IOError(e)
if result.status_code == requests.codes.ok:
eqsl = re.sub("^List.+UTC", "", result.text)
eqsl = eqsl.upper().split()
else:
raise IOError("HTTP Error: " + str(result.status_code))
return eqsl | Download the latest official list of `EQSL.cc`__ users. The list of users can be found here_.
Args:
url (str, optional): Download URL
Returns:
list: List containing the callsigns of EQSL users (unicode)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the EQSL user list and checks if DH1TW is a user:
>>> from pyhamtools.qsl import get_eqsl_users
>>> mylist = get_eqsl_users()
>>> try:
>>> mylist.index('DH1TW')
>>> except ValueError as e:
>>> print e
'DH1TW' is not in list
.. _here: http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt | Below is the the instruction that describes the task:
### Input:
Download the latest official list of `EQSL.cc`__ users. The list of users can be found here_.
Args:
url (str, optional): Download URL
Returns:
list: List containing the callsigns of EQSL users (unicode)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the EQSL user list and checks if DH1TW is a user:
>>> from pyhamtools.qsl import get_eqsl_users
>>> mylist = get_eqsl_users()
>>> try:
>>> mylist.index('DH1TW')
>>> except ValueError as e:
>>> print e
'DH1TW' is not in list
.. _here: http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt
### Response:
def get_eqsl_users(**kwargs):
"""Download the latest official list of `EQSL.cc`__ users. The list of users can be found here_.
Args:
url (str, optional): Download URL
Returns:
list: List containing the callsigns of EQSL users (unicode)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the EQSL user list and checks if DH1TW is a user:
>>> from pyhamtools.qsl import get_eqsl_users
>>> mylist = get_eqsl_users()
>>> try:
>>> mylist.index('DH1TW')
>>> except ValueError as e:
>>> print e
'DH1TW' is not in list
.. _here: http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt
"""
url = ""
eqsl = []
try:
url = kwargs['url']
except KeyError:
url = "http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt"
try:
result = requests.get(url)
except (ConnectionError, HTTPError, Timeout) as e:
raise IOError(e)
if result.status_code == requests.codes.ok:
eqsl = re.sub("^List.+UTC", "", result.text)
eqsl = eqsl.upper().split()
else:
raise IOError("HTTP Error: " + str(result.status_code))
return eqsl |
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = get_url()
context.configure(
url=url,
version_table="alembic_ziggurat_foundations_version",
transaction_per_migration=True,
)
with context.begin_transaction():
context.run_migrations() | Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output. | Below is the the instruction that describes the task:
### Input:
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
### Response:
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = get_url()
context.configure(
url=url,
version_table="alembic_ziggurat_foundations_version",
transaction_per_migration=True,
)
with context.begin_transaction():
context.run_migrations() |
def commit(self, container, repository=None, tag=None, message=None,
author=None, changes=None, conf=None):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'container': container,
'repo': repository,
'tag': tag,
'comment': message,
'author': author,
'changes': changes
}
u = self._url("/commit")
return self._result(
self._post_json(u, data=conf, params=params), json=True
) | Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | Below is the the instruction that describes the task:
### Input:
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
def commit(self, container, repository=None, tag=None, message=None,
author=None, changes=None, conf=None):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'container': container,
'repo': repository,
'tag': tag,
'comment': message,
'author': author,
'changes': changes
}
u = self._url("/commit")
return self._result(
self._post_json(u, data=conf, params=params), json=True
) |
def get_qseq_dir(fc_dir):
"""Retrieve the qseq directory within Solexa flowcell output.
"""
machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls")
if os.path.exists(machine_bc):
return machine_bc
# otherwise assume we are in the qseq directory
# XXX What other cases can we end up with here?
else:
return fc_dir | Retrieve the qseq directory within Solexa flowcell output. | Below is the the instruction that describes the task:
### Input:
Retrieve the qseq directory within Solexa flowcell output.
### Response:
def get_qseq_dir(fc_dir):
"""Retrieve the qseq directory within Solexa flowcell output.
"""
machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls")
if os.path.exists(machine_bc):
return machine_bc
# otherwise assume we are in the qseq directory
# XXX What other cases can we end up with here?
else:
return fc_dir |
def get_rlzs_by_gsim(self, trt_or_grp_id, sm_id=None):
"""
:param trt_or_grp_id: a tectonic region type or a source group ID
:param sm_id: source model ordinal (or None)
:returns: a dictionary gsim -> rlzs
"""
if isinstance(trt_or_grp_id, (int, U16, U32)): # grp_id
trt = self.csm_info.trt_by_grp[trt_or_grp_id]
sm_id = self.csm_info.get_sm_by_grp()[trt_or_grp_id]
else: # assume TRT string
trt = trt_or_grp_id
acc = collections.defaultdict(list)
if sm_id is None: # full dictionary
for rlz, gsim_by_trt in zip(self.realizations, self.gsim_by_trt):
acc[gsim_by_trt[trt]].append(rlz.ordinal)
else: # dictionary for the selected source model
for rlz in self.rlzs_by_smodel[sm_id]:
gsim_by_trt = self.gsim_by_trt[rlz.ordinal]
try: # if there is a single TRT
[gsim] = gsim_by_trt.values()
except ValueError: # there is more than 1 TRT
gsim = gsim_by_trt[trt]
acc[gsim].append(rlz.ordinal)
return {gsim: numpy.array(acc[gsim], dtype=U16)
for gsim in sorted(acc)} | :param trt_or_grp_id: a tectonic region type or a source group ID
:param sm_id: source model ordinal (or None)
:returns: a dictionary gsim -> rlzs | Below is the the instruction that describes the task:
### Input:
:param trt_or_grp_id: a tectonic region type or a source group ID
:param sm_id: source model ordinal (or None)
:returns: a dictionary gsim -> rlzs
### Response:
def get_rlzs_by_gsim(self, trt_or_grp_id, sm_id=None):
"""
:param trt_or_grp_id: a tectonic region type or a source group ID
:param sm_id: source model ordinal (or None)
:returns: a dictionary gsim -> rlzs
"""
if isinstance(trt_or_grp_id, (int, U16, U32)): # grp_id
trt = self.csm_info.trt_by_grp[trt_or_grp_id]
sm_id = self.csm_info.get_sm_by_grp()[trt_or_grp_id]
else: # assume TRT string
trt = trt_or_grp_id
acc = collections.defaultdict(list)
if sm_id is None: # full dictionary
for rlz, gsim_by_trt in zip(self.realizations, self.gsim_by_trt):
acc[gsim_by_trt[trt]].append(rlz.ordinal)
else: # dictionary for the selected source model
for rlz in self.rlzs_by_smodel[sm_id]:
gsim_by_trt = self.gsim_by_trt[rlz.ordinal]
try: # if there is a single TRT
[gsim] = gsim_by_trt.values()
except ValueError: # there is more than 1 TRT
gsim = gsim_by_trt[trt]
acc[gsim].append(rlz.ordinal)
return {gsim: numpy.array(acc[gsim], dtype=U16)
for gsim in sorted(acc)} |
def itemsbyscore(self, min='-inf', max='+inf', start=None, num=None,
reverse=None):
""" Return a range of |(member, score)| pairs from the sorted set name
with scores between @min and @max.
If @start and @num are specified, then return a slice
of the range.
@min: #int minimum score, or #str '-inf'
@max: #int minimum score, or #str '+inf'
@start: #int starting range position
@num: #int number of members to fetch
@reverse: #bool indicating whether to sort the results descendingly
-> yields |(member, score)| #tuple pairs
"""
reverse = reverse if reverse is not None else self.reversed
for member in self.iterbyscore(
min, max, start, num, withscores=True, reverse=reverse):
yield member | Return a range of |(member, score)| pairs from the sorted set name
with scores between @min and @max.
If @start and @num are specified, then return a slice
of the range.
@min: #int minimum score, or #str '-inf'
@max: #int minimum score, or #str '+inf'
@start: #int starting range position
@num: #int number of members to fetch
@reverse: #bool indicating whether to sort the results descendingly
-> yields |(member, score)| #tuple pairs | Below is the the instruction that describes the task:
### Input:
Return a range of |(member, score)| pairs from the sorted set name
with scores between @min and @max.
If @start and @num are specified, then return a slice
of the range.
@min: #int minimum score, or #str '-inf'
@max: #int minimum score, or #str '+inf'
@start: #int starting range position
@num: #int number of members to fetch
@reverse: #bool indicating whether to sort the results descendingly
-> yields |(member, score)| #tuple pairs
### Response:
def itemsbyscore(self, min='-inf', max='+inf', start=None, num=None,
reverse=None):
""" Return a range of |(member, score)| pairs from the sorted set name
with scores between @min and @max.
If @start and @num are specified, then return a slice
of the range.
@min: #int minimum score, or #str '-inf'
@max: #int minimum score, or #str '+inf'
@start: #int starting range position
@num: #int number of members to fetch
@reverse: #bool indicating whether to sort the results descendingly
-> yields |(member, score)| #tuple pairs
"""
reverse = reverse if reverse is not None else self.reversed
for member in self.iterbyscore(
min, max, start, num, withscores=True, reverse=reverse):
yield member |
def pstdev(data):
"""Calculates the population standard deviation."""
#: http://stackoverflow.com/a/27758326
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/n # the population variance
return pvar**0.5 | Calculates the population standard deviation. | Below is the the instruction that describes the task:
### Input:
Calculates the population standard deviation.
### Response:
def pstdev(data):
"""Calculates the population standard deviation."""
#: http://stackoverflow.com/a/27758326
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/n # the population variance
return pvar**0.5 |
def apply_poisson_noise(data, random_state=None):
"""
Apply Poisson noise to an array, where the value of each element in
the input array represents the expected number of counts.
Each pixel in the output array is generated by drawing a random
sample from a Poisson distribution whose expectation value is given
by the pixel value in the input array.
Parameters
----------
data : array-like
The array on which to apply Poisson noise. Every pixel in the
array must have a positive value (i.e. counts).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
result : `~numpy.ndarray`
The data array after applying Poisson noise.
See Also
--------
make_noise_image
Examples
--------
.. plot::
:include-source:
from photutils.datasets import make_4gaussians_image
from photutils.datasets import apply_poisson_noise
data1 = make_4gaussians_image(noise=False)
data2 = apply_poisson_noise(data1, random_state=12345)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.imshow(data1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(data2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with Poisson noise applied')
"""
data = np.asanyarray(data)
if np.any(data < 0):
raise ValueError('data must not contain any negative values')
prng = check_random_state(random_state)
return prng.poisson(data) | Apply Poisson noise to an array, where the value of each element in
the input array represents the expected number of counts.
Each pixel in the output array is generated by drawing a random
sample from a Poisson distribution whose expectation value is given
by the pixel value in the input array.
Parameters
----------
data : array-like
The array on which to apply Poisson noise. Every pixel in the
array must have a positive value (i.e. counts).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
result : `~numpy.ndarray`
The data array after applying Poisson noise.
See Also
--------
make_noise_image
Examples
--------
.. plot::
:include-source:
from photutils.datasets import make_4gaussians_image
from photutils.datasets import apply_poisson_noise
data1 = make_4gaussians_image(noise=False)
data2 = apply_poisson_noise(data1, random_state=12345)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.imshow(data1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(data2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with Poisson noise applied') | Below is the the instruction that describes the task:
### Input:
Apply Poisson noise to an array, where the value of each element in
the input array represents the expected number of counts.
Each pixel in the output array is generated by drawing a random
sample from a Poisson distribution whose expectation value is given
by the pixel value in the input array.
Parameters
----------
data : array-like
The array on which to apply Poisson noise. Every pixel in the
array must have a positive value (i.e. counts).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
result : `~numpy.ndarray`
The data array after applying Poisson noise.
See Also
--------
make_noise_image
Examples
--------
.. plot::
:include-source:
from photutils.datasets import make_4gaussians_image
from photutils.datasets import apply_poisson_noise
data1 = make_4gaussians_image(noise=False)
data2 = apply_poisson_noise(data1, random_state=12345)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.imshow(data1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(data2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with Poisson noise applied')
### Response:
def apply_poisson_noise(data, random_state=None):
"""
Apply Poisson noise to an array, where the value of each element in
the input array represents the expected number of counts.
Each pixel in the output array is generated by drawing a random
sample from a Poisson distribution whose expectation value is given
by the pixel value in the input array.
Parameters
----------
data : array-like
The array on which to apply Poisson noise. Every pixel in the
array must have a positive value (i.e. counts).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
result : `~numpy.ndarray`
The data array after applying Poisson noise.
See Also
--------
make_noise_image
Examples
--------
.. plot::
:include-source:
from photutils.datasets import make_4gaussians_image
from photutils.datasets import apply_poisson_noise
data1 = make_4gaussians_image(noise=False)
data2 = apply_poisson_noise(data1, random_state=12345)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.imshow(data1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(data2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with Poisson noise applied')
"""
data = np.asanyarray(data)
if np.any(data < 0):
raise ValueError('data must not contain any negative values')
prng = check_random_state(random_state)
return prng.poisson(data) |
def collect(self):
"""Yields metrics from the collectors in the registry."""
collectors = None
with self._lock:
collectors = copy.copy(self._collector_to_names)
for collector in collectors:
for metric in collector.collect():
yield metric | Yields metrics from the collectors in the registry. | Below is the the instruction that describes the task:
### Input:
Yields metrics from the collectors in the registry.
### Response:
def collect(self):
"""Yields metrics from the collectors in the registry."""
collectors = None
with self._lock:
collectors = copy.copy(self._collector_to_names)
for collector in collectors:
for metric in collector.collect():
yield metric |
def add_task_group(self, task_group, project):
"""AddTaskGroup.
[Preview API] Create a task group.
:param :class:`<TaskGroupCreateParameter> <azure.devops.v5_0.task_agent.models.TaskGroupCreateParameter>` task_group: Task group object to create.
:param str project: Project ID or project name
:rtype: :class:`<TaskGroup> <azure.devops.v5_0.task_agent.models.TaskGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(task_group, 'TaskGroupCreateParameter')
response = self._send(http_method='POST',
location_id='6c08ffbf-dbf1-4f9a-94e5-a1cbd47005e7',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskGroup', response) | AddTaskGroup.
[Preview API] Create a task group.
:param :class:`<TaskGroupCreateParameter> <azure.devops.v5_0.task_agent.models.TaskGroupCreateParameter>` task_group: Task group object to create.
:param str project: Project ID or project name
:rtype: :class:`<TaskGroup> <azure.devops.v5_0.task_agent.models.TaskGroup>` | Below is the the instruction that describes the task:
### Input:
AddTaskGroup.
[Preview API] Create a task group.
:param :class:`<TaskGroupCreateParameter> <azure.devops.v5_0.task_agent.models.TaskGroupCreateParameter>` task_group: Task group object to create.
:param str project: Project ID or project name
:rtype: :class:`<TaskGroup> <azure.devops.v5_0.task_agent.models.TaskGroup>`
### Response:
def add_task_group(self, task_group, project):
"""AddTaskGroup.
[Preview API] Create a task group.
:param :class:`<TaskGroupCreateParameter> <azure.devops.v5_0.task_agent.models.TaskGroupCreateParameter>` task_group: Task group object to create.
:param str project: Project ID or project name
:rtype: :class:`<TaskGroup> <azure.devops.v5_0.task_agent.models.TaskGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(task_group, 'TaskGroupCreateParameter')
response = self._send(http_method='POST',
location_id='6c08ffbf-dbf1-4f9a-94e5-a1cbd47005e7',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskGroup', response) |
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:] | Setup the screen buffer from the C++ code. | Below is the the instruction that describes the task:
### Input:
Setup the screen buffer from the C++ code.
### Response:
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:] |
def parse_timestr(timestr):
"""
Parse a string describing a point in time.
"""
timedelta_secs = parse_timedelta(timestr)
sync_start = datetime.now()
if timedelta_secs:
target = datetime.now() + timedelta(seconds=timedelta_secs)
elif timestr.isdigit():
target = datetime.now() + timedelta(seconds=int(timestr))
else:
try:
target = parse(timestr)
except:
# unfortunately, dateutil doesn't raise the best exceptions
raise ValueError("Unable to parse '{}'".format(timestr))
# When I do "termdown 10" (the two cases above), I want a
# countdown for the next 10 seconds. Okay. But when I do
# "termdown 23:52", I want a countdown that ends at that exact
# moment -- the countdown is related to real time. Thus, I want
# my frames to be drawn at full seconds, so I enforce
# microsecond=0.
sync_start = sync_start.replace(microsecond=0)
try:
# try to convert target to naive local timezone
target = target.astimezone(tz=tz.tzlocal()).replace(tzinfo=None)
except ValueError:
# parse() already returned a naive datetime, all is well
pass
return (sync_start, target) | Parse a string describing a point in time. | Below is the the instruction that describes the task:
### Input:
Parse a string describing a point in time.
### Response:
def parse_timestr(timestr):
"""
Parse a string describing a point in time.
"""
timedelta_secs = parse_timedelta(timestr)
sync_start = datetime.now()
if timedelta_secs:
target = datetime.now() + timedelta(seconds=timedelta_secs)
elif timestr.isdigit():
target = datetime.now() + timedelta(seconds=int(timestr))
else:
try:
target = parse(timestr)
except:
# unfortunately, dateutil doesn't raise the best exceptions
raise ValueError("Unable to parse '{}'".format(timestr))
# When I do "termdown 10" (the two cases above), I want a
# countdown for the next 10 seconds. Okay. But when I do
# "termdown 23:52", I want a countdown that ends at that exact
# moment -- the countdown is related to real time. Thus, I want
# my frames to be drawn at full seconds, so I enforce
# microsecond=0.
sync_start = sync_start.replace(microsecond=0)
try:
# try to convert target to naive local timezone
target = target.astimezone(tz=tz.tzlocal()).replace(tzinfo=None)
except ValueError:
# parse() already returned a naive datetime, all is well
pass
return (sync_start, target) |
def memoize_method(method):
"""Simple caching decorator."""
cache = {}
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
"""Caching wrapper."""
key = (args, tuple(sorted(kwargs.items())))
if key in cache:
return cache[key]
else:
result = method(self, *args, **kwargs)
cache[key] = result
return result
return wrapper | Simple caching decorator. | Below is the the instruction that describes the task:
### Input:
Simple caching decorator.
### Response:
def memoize_method(method):
"""Simple caching decorator."""
cache = {}
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
"""Caching wrapper."""
key = (args, tuple(sorted(kwargs.items())))
if key in cache:
return cache[key]
else:
result = method(self, *args, **kwargs)
cache[key] = result
return result
return wrapper |
def updateObj(self,event):
"""Put this object in the search box"""
name=w.objList.get("active")
w.SearchVar.set(name)
w.ObjInfo.set(objInfoDict[name])
return | Put this object in the search box | Below is the the instruction that describes the task:
### Input:
Put this object in the search box
### Response:
def updateObj(self,event):
"""Put this object in the search box"""
name=w.objList.get("active")
w.SearchVar.set(name)
w.ObjInfo.set(objInfoDict[name])
return |
def get_pyof_obj_new_version(name, obj, new_version):
r"""Return a class attribute on a different pyof version.
This method receives the name of a class attribute, the class attribute
itself (object) and an openflow version.
The attribute will be evaluated and from it we will recover its class
and the module where the class was defined.
If the module is a "python-openflow version specific module" (starts
with "pyof.v0"), then we will get it's version and if it is different
from the 'new_version', then we will get the module on the
'new_version', look for the 'obj' class on the new module and return
an instance of the new version of the 'obj'.
Example:
>>> from pyof.foundation.base import MetaStruct as ms
>>> from pyof.v0x01.common.header import Header
>>> name = 'header'
>>> obj = Header()
>>> new_version = 'v0x04'
>>> header, obj2 = ms.get_pyof_obj_new_version(name, obj, new_version)
>>> header
'header'
>>> obj.version
UBInt8(1)
>>> obj2.version
UBInt8(4)
Args:
name (str): the name of the class attribute being handled.
obj (object): the class attribute itself
new_version (string): the pyof version in which you want the object
'obj'.
Return:
(str, obj): Tuple with class name and object instance.
A tuple in which the first item is the name of the class
attribute (the same that was passed), and the second item is a
instance of the passed class attribute. If the class attribute
is not a pyof versioned attribute, then the same passed object
is returned without any changes. Also, if the obj is a pyof
versioned attribute, but it is already on the right version
(same as new_version), then the passed obj is return.
"""
if new_version is None:
return (name, obj)
cls = obj.__class__
cls_name = cls.__name__
cls_mod = cls.__module__
#: If the module name does not starts with pyof.v0 then it is not a
#: 'pyof versioned' module (OpenFlow specification defined), so we do
#: not have anything to do with it.
new_mod = MetaStruct.replace_pyof_version(cls_mod, new_version)
if new_mod is not None:
# Loads the module
new_mod = importlib.import_module(new_mod)
#: Get the class from the loaded module
new_cls = getattr(new_mod, cls_name)
#: return the tuple with the attribute name and the instance
return (name, new_cls())
return (name, obj) | r"""Return a class attribute on a different pyof version.
This method receives the name of a class attribute, the class attribute
itself (object) and an openflow version.
The attribute will be evaluated and from it we will recover its class
and the module where the class was defined.
If the module is a "python-openflow version specific module" (starts
with "pyof.v0"), then we will get it's version and if it is different
from the 'new_version', then we will get the module on the
'new_version', look for the 'obj' class on the new module and return
an instance of the new version of the 'obj'.
Example:
>>> from pyof.foundation.base import MetaStruct as ms
>>> from pyof.v0x01.common.header import Header
>>> name = 'header'
>>> obj = Header()
>>> new_version = 'v0x04'
>>> header, obj2 = ms.get_pyof_obj_new_version(name, obj, new_version)
>>> header
'header'
>>> obj.version
UBInt8(1)
>>> obj2.version
UBInt8(4)
Args:
name (str): the name of the class attribute being handled.
obj (object): the class attribute itself
new_version (string): the pyof version in which you want the object
'obj'.
Return:
(str, obj): Tuple with class name and object instance.
A tuple in which the first item is the name of the class
attribute (the same that was passed), and the second item is a
instance of the passed class attribute. If the class attribute
is not a pyof versioned attribute, then the same passed object
is returned without any changes. Also, if the obj is a pyof
versioned attribute, but it is already on the right version
(same as new_version), then the passed obj is return. | Below is the the instruction that describes the task:
### Input:
r"""Return a class attribute on a different pyof version.
This method receives the name of a class attribute, the class attribute
itself (object) and an openflow version.
The attribute will be evaluated and from it we will recover its class
and the module where the class was defined.
If the module is a "python-openflow version specific module" (starts
with "pyof.v0"), then we will get it's version and if it is different
from the 'new_version', then we will get the module on the
'new_version', look for the 'obj' class on the new module and return
an instance of the new version of the 'obj'.
Example:
>>> from pyof.foundation.base import MetaStruct as ms
>>> from pyof.v0x01.common.header import Header
>>> name = 'header'
>>> obj = Header()
>>> new_version = 'v0x04'
>>> header, obj2 = ms.get_pyof_obj_new_version(name, obj, new_version)
>>> header
'header'
>>> obj.version
UBInt8(1)
>>> obj2.version
UBInt8(4)
Args:
name (str): the name of the class attribute being handled.
obj (object): the class attribute itself
new_version (string): the pyof version in which you want the object
'obj'.
Return:
(str, obj): Tuple with class name and object instance.
A tuple in which the first item is the name of the class
attribute (the same that was passed), and the second item is a
instance of the passed class attribute. If the class attribute
is not a pyof versioned attribute, then the same passed object
is returned without any changes. Also, if the obj is a pyof
versioned attribute, but it is already on the right version
(same as new_version), then the passed obj is return.
### Response:
def get_pyof_obj_new_version(name, obj, new_version):
r"""Return a class attribute on a different pyof version.
This method receives the name of a class attribute, the class attribute
itself (object) and an openflow version.
The attribute will be evaluated and from it we will recover its class
and the module where the class was defined.
If the module is a "python-openflow version specific module" (starts
with "pyof.v0"), then we will get it's version and if it is different
from the 'new_version', then we will get the module on the
'new_version', look for the 'obj' class on the new module and return
an instance of the new version of the 'obj'.
Example:
>>> from pyof.foundation.base import MetaStruct as ms
>>> from pyof.v0x01.common.header import Header
>>> name = 'header'
>>> obj = Header()
>>> new_version = 'v0x04'
>>> header, obj2 = ms.get_pyof_obj_new_version(name, obj, new_version)
>>> header
'header'
>>> obj.version
UBInt8(1)
>>> obj2.version
UBInt8(4)
Args:
name (str): the name of the class attribute being handled.
obj (object): the class attribute itself
new_version (string): the pyof version in which you want the object
'obj'.
Return:
(str, obj): Tuple with class name and object instance.
A tuple in which the first item is the name of the class
attribute (the same that was passed), and the second item is a
instance of the passed class attribute. If the class attribute
is not a pyof versioned attribute, then the same passed object
is returned without any changes. Also, if the obj is a pyof
versioned attribute, but it is already on the right version
(same as new_version), then the passed obj is return.
"""
if new_version is None:
return (name, obj)
cls = obj.__class__
cls_name = cls.__name__
cls_mod = cls.__module__
#: If the module name does not starts with pyof.v0 then it is not a
#: 'pyof versioned' module (OpenFlow specification defined), so we do
#: not have anything to do with it.
new_mod = MetaStruct.replace_pyof_version(cls_mod, new_version)
if new_mod is not None:
# Loads the module
new_mod = importlib.import_module(new_mod)
#: Get the class from the loaded module
new_cls = getattr(new_mod, cls_name)
#: return the tuple with the attribute name and the instance
return (name, new_cls())
return (name, obj) |
def astra_projection_geometry(geometry):
"""Create an ASTRA projection geometry from an ODL geometry object.
As of ASTRA version 1.7, the length values are not required any more to be
rescaled for 3D geometries and non-unit (but isotropic) voxel sizes.
Parameters
----------
geometry : `Geometry`
ODL projection geometry from which to create the ASTRA geometry.
Returns
-------
proj_geom : dict
Dictionary defining the ASTRA projection geometry.
"""
if not isinstance(geometry, Geometry):
raise TypeError('`geometry` {!r} is not a `Geometry` instance'
''.format(geometry))
if 'astra' in geometry.implementation_cache:
# Shortcut, reuse already computed value.
return geometry.implementation_cache['astra']
if not geometry.det_partition.is_uniform:
raise ValueError('non-uniform detector sampling is not supported')
if (isinstance(geometry, ParallelBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 2):
# TODO: change to parallel_vec when available
det_width = geometry.det_partition.cell_sides[0]
det_count = geometry.detector.size
# Instead of rotating the data by 90 degrees counter-clockwise,
# we subtract pi/2 from the geometry angles, thereby rotating the
# geometry by 90 degrees clockwise
angles = geometry.angles - np.pi / 2
proj_geom = astra.create_proj_geom('parallel', det_width, det_count,
angles)
elif (isinstance(geometry, DivergentBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 2):
det_count = geometry.detector.size
vec = astra_conebeam_2d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('fanflat_vec', det_count, vec)
elif (isinstance(geometry, ParallelBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 3):
# Swap detector axes (see astra_*_3d_to_vec)
det_row_count = geometry.det_partition.shape[0]
det_col_count = geometry.det_partition.shape[1]
vec = astra_parallel_3d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('parallel3d_vec', det_row_count,
det_col_count, vec)
elif (isinstance(geometry, DivergentBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 3):
# Swap detector axes (see astra_*_3d_to_vec)
det_row_count = geometry.det_partition.shape[0]
det_col_count = geometry.det_partition.shape[1]
vec = astra_conebeam_3d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('cone_vec', det_row_count,
det_col_count, vec)
else:
raise NotImplementedError('unknown ASTRA geometry type {!r}'
''.format(geometry))
if 'astra' not in geometry.implementation_cache:
# Save computed value for later
geometry.implementation_cache['astra'] = proj_geom
return proj_geom | Create an ASTRA projection geometry from an ODL geometry object.
As of ASTRA version 1.7, the length values are not required any more to be
rescaled for 3D geometries and non-unit (but isotropic) voxel sizes.
Parameters
----------
geometry : `Geometry`
ODL projection geometry from which to create the ASTRA geometry.
Returns
-------
proj_geom : dict
Dictionary defining the ASTRA projection geometry. | Below is the the instruction that describes the task:
### Input:
Create an ASTRA projection geometry from an ODL geometry object.
As of ASTRA version 1.7, the length values are not required any more to be
rescaled for 3D geometries and non-unit (but isotropic) voxel sizes.
Parameters
----------
geometry : `Geometry`
ODL projection geometry from which to create the ASTRA geometry.
Returns
-------
proj_geom : dict
Dictionary defining the ASTRA projection geometry.
### Response:
def astra_projection_geometry(geometry):
"""Create an ASTRA projection geometry from an ODL geometry object.
As of ASTRA version 1.7, the length values are not required any more to be
rescaled for 3D geometries and non-unit (but isotropic) voxel sizes.
Parameters
----------
geometry : `Geometry`
ODL projection geometry from which to create the ASTRA geometry.
Returns
-------
proj_geom : dict
Dictionary defining the ASTRA projection geometry.
"""
if not isinstance(geometry, Geometry):
raise TypeError('`geometry` {!r} is not a `Geometry` instance'
''.format(geometry))
if 'astra' in geometry.implementation_cache:
# Shortcut, reuse already computed value.
return geometry.implementation_cache['astra']
if not geometry.det_partition.is_uniform:
raise ValueError('non-uniform detector sampling is not supported')
if (isinstance(geometry, ParallelBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 2):
# TODO: change to parallel_vec when available
det_width = geometry.det_partition.cell_sides[0]
det_count = geometry.detector.size
# Instead of rotating the data by 90 degrees counter-clockwise,
# we subtract pi/2 from the geometry angles, thereby rotating the
# geometry by 90 degrees clockwise
angles = geometry.angles - np.pi / 2
proj_geom = astra.create_proj_geom('parallel', det_width, det_count,
angles)
elif (isinstance(geometry, DivergentBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 2):
det_count = geometry.detector.size
vec = astra_conebeam_2d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('fanflat_vec', det_count, vec)
elif (isinstance(geometry, ParallelBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 3):
# Swap detector axes (see astra_*_3d_to_vec)
det_row_count = geometry.det_partition.shape[0]
det_col_count = geometry.det_partition.shape[1]
vec = astra_parallel_3d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('parallel3d_vec', det_row_count,
det_col_count, vec)
elif (isinstance(geometry, DivergentBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 3):
# Swap detector axes (see astra_*_3d_to_vec)
det_row_count = geometry.det_partition.shape[0]
det_col_count = geometry.det_partition.shape[1]
vec = astra_conebeam_3d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('cone_vec', det_row_count,
det_col_count, vec)
else:
raise NotImplementedError('unknown ASTRA geometry type {!r}'
''.format(geometry))
if 'astra' not in geometry.implementation_cache:
# Save computed value for later
geometry.implementation_cache['astra'] = proj_geom
return proj_geom |
def feature_path_unset(self):
"""Copy features to memory and remove the association of the feature file."""
if not self.feature_file:
raise IOError('No feature file to unset')
with open(self.feature_path) as handle:
feats = list(GFF.parse(handle))
if len(feats) > 1:
log.warning('Too many sequences in GFF')
else:
tmp = feats[0].features
self.feature_dir = None
self.feature_file = None
self.features = tmp | Copy features to memory and remove the association of the feature file. | Below is the the instruction that describes the task:
### Input:
Copy features to memory and remove the association of the feature file.
### Response:
def feature_path_unset(self):
"""Copy features to memory and remove the association of the feature file."""
if not self.feature_file:
raise IOError('No feature file to unset')
with open(self.feature_path) as handle:
feats = list(GFF.parse(handle))
if len(feats) > 1:
log.warning('Too many sequences in GFF')
else:
tmp = feats[0].features
self.feature_dir = None
self.feature_file = None
self.features = tmp |
def p_duration_number_duration_unit(self, p):
'duration : NUMBER DURATION_UNIT'
logger.debug('duration = number %s, duration unit %s', p[1], p[2])
p[0] = Duration.from_quantity_unit(p[1], p[2]) | duration : NUMBER DURATION_UNIT | Below is the the instruction that describes the task:
### Input:
duration : NUMBER DURATION_UNIT
### Response:
def p_duration_number_duration_unit(self, p):
'duration : NUMBER DURATION_UNIT'
logger.debug('duration = number %s, duration unit %s', p[1], p[2])
p[0] = Duration.from_quantity_unit(p[1], p[2]) |
def parse(value):
"""Parse the string date.
This supports the subset of ISO8601 used by xsd:time, but is lenient
with what is accepted, handling most reasonable syntax.
@param value: A time string.
@type value: str
@return: A time object.
@rtype: B{datetime}.I{time}
"""
match_result = RE_TIME.match(value)
if match_result is None:
raise ValueError('date data has invalid format "%s"' % (value, ))
date = time_from_match(match_result)
tzinfo = tzinfo_from_match(match_result)
value = date.replace(tzinfo=tzinfo)
return value | Parse the string date.
This supports the subset of ISO8601 used by xsd:time, but is lenient
with what is accepted, handling most reasonable syntax.
@param value: A time string.
@type value: str
@return: A time object.
@rtype: B{datetime}.I{time} | Below is the the instruction that describes the task:
### Input:
Parse the string date.
This supports the subset of ISO8601 used by xsd:time, but is lenient
with what is accepted, handling most reasonable syntax.
@param value: A time string.
@type value: str
@return: A time object.
@rtype: B{datetime}.I{time}
### Response:
def parse(value):
"""Parse the string date.
This supports the subset of ISO8601 used by xsd:time, but is lenient
with what is accepted, handling most reasonable syntax.
@param value: A time string.
@type value: str
@return: A time object.
@rtype: B{datetime}.I{time}
"""
match_result = RE_TIME.match(value)
if match_result is None:
raise ValueError('date data has invalid format "%s"' % (value, ))
date = time_from_match(match_result)
tzinfo = tzinfo_from_match(match_result)
value = date.replace(tzinfo=tzinfo)
return value |
def update_status_with_media(self, **params): # pragma: no cover
"""Updates the authenticating user's current status and attaches media
for upload. In other words, it creates a Tweet with a picture attached.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update_with_media
"""
warnings.warn(
'This method is deprecated. You should use Twython.upload_media instead.',
TwythonDeprecationWarning,
stacklevel=2
)
return self.post('statuses/update_with_media', params=params) | Updates the authenticating user's current status and attaches media
for upload. In other words, it creates a Tweet with a picture attached.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update_with_media | Below is the the instruction that describes the task:
### Input:
Updates the authenticating user's current status and attaches media
for upload. In other words, it creates a Tweet with a picture attached.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update_with_media
### Response:
def update_status_with_media(self, **params): # pragma: no cover
"""Updates the authenticating user's current status and attaches media
for upload. In other words, it creates a Tweet with a picture attached.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update_with_media
"""
warnings.warn(
'This method is deprecated. You should use Twython.upload_media instead.',
TwythonDeprecationWarning,
stacklevel=2
)
return self.post('statuses/update_with_media', params=params) |
def journals(self):
"""
Retrieve journals attribute for this very Issue
"""
try:
target = self._item_path
json_data = self._redmine.get(target % str(self.id),
parms={'include': 'journals'})
data = self._redmine.unwrap_json(None, json_data)
journals = [Journal(redmine=self._redmine,
data=journal,
type='issue_journal')
for journal in data['issue']['journals']]
return journals
except Exception:
return [] | Retrieve journals attribute for this very Issue | Below is the the instruction that describes the task:
### Input:
Retrieve journals attribute for this very Issue
### Response:
def journals(self):
"""
Retrieve journals attribute for this very Issue
"""
try:
target = self._item_path
json_data = self._redmine.get(target % str(self.id),
parms={'include': 'journals'})
data = self._redmine.unwrap_json(None, json_data)
journals = [Journal(redmine=self._redmine,
data=journal,
type='issue_journal')
for journal in data['issue']['journals']]
return journals
except Exception:
return [] |
def read_file(self, changeset_file):
"""Download the replication changeset file or read it directly from the
filesystem (to test purposes).
"""
if isfile(changeset_file):
self.filename = changeset_file
else:
self.path = mkdtemp()
self.filename = join(self.path, basename(changeset_file))
download(changeset_file, self.path)
self.xml = ET.fromstring(gzip.open(self.filename).read())
# delete folder created to download the file
if not isfile(changeset_file):
rmtree(self.path) | Download the replication changeset file or read it directly from the
filesystem (to test purposes). | Below is the the instruction that describes the task:
### Input:
Download the replication changeset file or read it directly from the
filesystem (to test purposes).
### Response:
def read_file(self, changeset_file):
"""Download the replication changeset file or read it directly from the
filesystem (to test purposes).
"""
if isfile(changeset_file):
self.filename = changeset_file
else:
self.path = mkdtemp()
self.filename = join(self.path, basename(changeset_file))
download(changeset_file, self.path)
self.xml = ET.fromstring(gzip.open(self.filename).read())
# delete folder created to download the file
if not isfile(changeset_file):
rmtree(self.path) |
def load(self, ymlfile=None):
"""Load and process the YAML file"""
if ymlfile is not None:
self.ymlfile = ymlfile
try:
# If yaml should be 'cleaned' of document references
if self._clean:
self.data = self.process(self.ymlfile)
else:
with open(self.ymlfile, 'rb') as stream:
for data in yaml.load_all(stream):
self.data.append(data)
self.loaded = True
except ScannerError, e:
msg = "YAML formattting error - '" + self.ymlfile + ": '" + str(e) + "'"
raise util.YAMLError(msg) | Load and process the YAML file | Below is the the instruction that describes the task:
### Input:
Load and process the YAML file
### Response:
def load(self, ymlfile=None):
"""Load and process the YAML file"""
if ymlfile is not None:
self.ymlfile = ymlfile
try:
# If yaml should be 'cleaned' of document references
if self._clean:
self.data = self.process(self.ymlfile)
else:
with open(self.ymlfile, 'rb') as stream:
for data in yaml.load_all(stream):
self.data.append(data)
self.loaded = True
except ScannerError, e:
msg = "YAML formattting error - '" + self.ymlfile + ": '" + str(e) + "'"
raise util.YAMLError(msg) |
def remove(self, option):
"""
Removes an option from a Config instance
IN: option (type: Option)
"""
if option.__class__ == Option:
if option in self.options:
del self.options[self.options.index(option)]
else:
raise OptionNotFoundError(option.name)
else:
raise TypeError("invalid type supplied") | Removes an option from a Config instance
IN: option (type: Option) | Below is the the instruction that describes the task:
### Input:
Removes an option from a Config instance
IN: option (type: Option)
### Response:
def remove(self, option):
"""
Removes an option from a Config instance
IN: option (type: Option)
"""
if option.__class__ == Option:
if option in self.options:
del self.options[self.options.index(option)]
else:
raise OptionNotFoundError(option.name)
else:
raise TypeError("invalid type supplied") |
def get(self, name):
"""
Get an asset from the ZIP archive.
Parameters
-------------
name : str
Name of the asset
Returns
-------------
data : bytes
Loaded data from asset
"""
# not much we can do with that
if name is None:
return
# if name isn't in archive try some similar values
if name not in self.archive:
if hasattr(name, 'decode'):
name = name.decode('utf-8')
# try with cleared whitespace, split paths
for option in [name,
name.lstrip('./'),
name.strip(),
name.split('/')[-1]]:
if option in self.archive:
name = option
break
# read file object from beginning
self.archive[name].seek(0)
# data is stored as a file object
data = self.archive[name].read()
return data | Get an asset from the ZIP archive.
Parameters
-------------
name : str
Name of the asset
Returns
-------------
data : bytes
Loaded data from asset | Below is the the instruction that describes the task:
### Input:
Get an asset from the ZIP archive.
Parameters
-------------
name : str
Name of the asset
Returns
-------------
data : bytes
Loaded data from asset
### Response:
def get(self, name):
"""
Get an asset from the ZIP archive.
Parameters
-------------
name : str
Name of the asset
Returns
-------------
data : bytes
Loaded data from asset
"""
# not much we can do with that
if name is None:
return
# if name isn't in archive try some similar values
if name not in self.archive:
if hasattr(name, 'decode'):
name = name.decode('utf-8')
# try with cleared whitespace, split paths
for option in [name,
name.lstrip('./'),
name.strip(),
name.split('/')[-1]]:
if option in self.archive:
name = option
break
# read file object from beginning
self.archive[name].seek(0)
# data is stored as a file object
data = self.archive[name].read()
return data |
def line_items(self):
"""Apply a datetime filter against the contributors's line item queryset."""
if self._line_items is None:
self._line_items = self.contributor.line_items.filter(
payment_date__range=(self.start, self.end)
)
return self._line_items | Apply a datetime filter against the contributors's line item queryset. | Below is the the instruction that describes the task:
### Input:
Apply a datetime filter against the contributors's line item queryset.
### Response:
def line_items(self):
"""Apply a datetime filter against the contributors's line item queryset."""
if self._line_items is None:
self._line_items = self.contributor.line_items.filter(
payment_date__range=(self.start, self.end)
)
return self._line_items |
def update_qos_aggregated_configuration(self, qos_configuration, timeout=-1):
"""
Updates the QoS aggregated configuration for the logical interconnect.
Args:
qos_configuration:
QOS configuration.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
"""
uri = "{}{}".format(self.data["uri"], self.QOS_AGGREGATED_CONFIGURATION)
return self._helper.update(qos_configuration, uri=uri, timeout=timeout) | Updates the QoS aggregated configuration for the logical interconnect.
Args:
qos_configuration:
QOS configuration.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Logical Interconnect. | Below is the the instruction that describes the task:
### Input:
Updates the QoS aggregated configuration for the logical interconnect.
Args:
qos_configuration:
QOS configuration.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
### Response:
def update_qos_aggregated_configuration(self, qos_configuration, timeout=-1):
"""
Updates the QoS aggregated configuration for the logical interconnect.
Args:
qos_configuration:
QOS configuration.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
"""
uri = "{}{}".format(self.data["uri"], self.QOS_AGGREGATED_CONFIGURATION)
return self._helper.update(qos_configuration, uri=uri, timeout=timeout) |
def commit(self):
"""
As in DBAPI2.0:
Commit any pending transaction to the database. Note that
if the database supports an auto-commit feature, this must
be initially off. An interface method may be provided to
turn it back on.
Database modules that do not support transactions should
implement this method with void functionality.
"""
try:
self.__dbapi2_conn.commit()
except Exception, e:
# try to reconnect
self.reconnect(None)
self.__dbapi2_conn.commit() | As in DBAPI2.0:
Commit any pending transaction to the database. Note that
if the database supports an auto-commit feature, this must
be initially off. An interface method may be provided to
turn it back on.
Database modules that do not support transactions should
implement this method with void functionality. | Below is the the instruction that describes the task:
### Input:
As in DBAPI2.0:
Commit any pending transaction to the database. Note that
if the database supports an auto-commit feature, this must
be initially off. An interface method may be provided to
turn it back on.
Database modules that do not support transactions should
implement this method with void functionality.
### Response:
def commit(self):
"""
As in DBAPI2.0:
Commit any pending transaction to the database. Note that
if the database supports an auto-commit feature, this must
be initially off. An interface method may be provided to
turn it back on.
Database modules that do not support transactions should
implement this method with void functionality.
"""
try:
self.__dbapi2_conn.commit()
except Exception, e:
# try to reconnect
self.reconnect(None)
self.__dbapi2_conn.commit() |
def long_description():
"""Generate .rst document for PyPi."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--doc', dest="doc",
action="store_true", default=False)
args, sys.argv = parser.parse_known_args(sys.argv)
if args.doc:
import doc2md, pypandoc
md = doc2md.doc2md(doc2md.__doc__, "doc2md", toc=False)
long_description = pypandoc.convert(md, 'rst', format='md')
else:
return None | Generate .rst document for PyPi. | Below is the the instruction that describes the task:
### Input:
Generate .rst document for PyPi.
### Response:
def long_description():
"""Generate .rst document for PyPi."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--doc', dest="doc",
action="store_true", default=False)
args, sys.argv = parser.parse_known_args(sys.argv)
if args.doc:
import doc2md, pypandoc
md = doc2md.doc2md(doc2md.__doc__, "doc2md", toc=False)
long_description = pypandoc.convert(md, 'rst', format='md')
else:
return None |
def run_flag_maf_zero(in_prefix, in_type, out_prefix, base_dir, options):
"""Runs step11 (flag MAF zero).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module.
The required file type for this module is ``bfile``, hence the need to use
the :py:func:`check_input_files` to check if the file input file type is
the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module doesn't return
usable output files. Hence, this function returns the input file prefix
and its type.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need bfile
required_type = "bfile"
check_input_files(in_prefix, in_type, required_type)
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "flag_maf_0")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
flag_maf_zero.main(options)
except flag_maf_zero.ProgramError as e:
msg = "flag_maf_zero: {}".format(e)
raise ProgramError(msg)
# Reading the file to compute the number of flagged markers
nb_flagged = None
flagged_fn = script_prefix + ".list"
with open(flagged_fn, "r") as i_file:
nb_flagged = len(i_file.read().splitlines())
# We write a LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
flag_maf_zero.pretty_name
)
safe_fn = latex_template.sanitize_tex(os.path.basename(flagged_fn))
text = (
"After computing minor allele frequencies (MAF) of all "
"markers using Plink, a total of {:,d} marker{} had a MAF "
"of zero and were flagged ({}).".format(
nb_flagged,
"s" if nb_flagged - 1 > 1 else "",
"see file " + latex_template.texttt(safe_fn) +
" for more information"
)
)
print >>o_file, latex_template.wrap_lines(text)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# Writing the summary results
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "# {}".format(script_prefix)
print >>o_file, ("Number of markers flagged for MAF of 0\t"
"{:,d}".format(nb_flagged))
print >>o_file, "---"
# We know this step doesn't produce an new data set, so we return the old
# prefix and the old in_type
return _StepResult(
next_file=in_prefix,
next_file_type=required_type,
latex_summary=latex_file,
description=flag_maf_zero.desc,
long_description=flag_maf_zero.long_desc,
graph_path=None,
) | Runs step11 (flag MAF zero).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module.
The required file type for this module is ``bfile``, hence the need to use
the :py:func:`check_input_files` to check if the file input file type is
the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module doesn't return
usable output files. Hence, this function returns the input file prefix
and its type. | Below is the the instruction that describes the task:
### Input:
Runs step11 (flag MAF zero).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module.
The required file type for this module is ``bfile``, hence the need to use
the :py:func:`check_input_files` to check if the file input file type is
the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module doesn't return
usable output files. Hence, this function returns the input file prefix
and its type.
### Response:
def run_flag_maf_zero(in_prefix, in_type, out_prefix, base_dir, options):
"""Runs step11 (flag MAF zero).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module.
The required file type for this module is ``bfile``, hence the need to use
the :py:func:`check_input_files` to check if the file input file type is
the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module doesn't return
usable output files. Hence, this function returns the input file prefix
and its type.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need bfile
required_type = "bfile"
check_input_files(in_prefix, in_type, required_type)
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "flag_maf_0")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
flag_maf_zero.main(options)
except flag_maf_zero.ProgramError as e:
msg = "flag_maf_zero: {}".format(e)
raise ProgramError(msg)
# Reading the file to compute the number of flagged markers
nb_flagged = None
flagged_fn = script_prefix + ".list"
with open(flagged_fn, "r") as i_file:
nb_flagged = len(i_file.read().splitlines())
# We write a LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
flag_maf_zero.pretty_name
)
safe_fn = latex_template.sanitize_tex(os.path.basename(flagged_fn))
text = (
"After computing minor allele frequencies (MAF) of all "
"markers using Plink, a total of {:,d} marker{} had a MAF "
"of zero and were flagged ({}).".format(
nb_flagged,
"s" if nb_flagged - 1 > 1 else "",
"see file " + latex_template.texttt(safe_fn) +
" for more information"
)
)
print >>o_file, latex_template.wrap_lines(text)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# Writing the summary results
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "# {}".format(script_prefix)
print >>o_file, ("Number of markers flagged for MAF of 0\t"
"{:,d}".format(nb_flagged))
print >>o_file, "---"
# We know this step doesn't produce an new data set, so we return the old
# prefix and the old in_type
return _StepResult(
next_file=in_prefix,
next_file_type=required_type,
latex_summary=latex_file,
description=flag_maf_zero.desc,
long_description=flag_maf_zero.long_desc,
graph_path=None,
) |
def preprocess(string):
"""
Preprocesses a string, by replacing ${VARNAME} with
os.environ['VARNAME']
Parameters
----------
string: the str object to preprocess
Returns
-------
the preprocessed string
"""
split = string.split('${')
rval = [split[0]]
for candidate in split[1:]:
subsplit = candidate.split('}')
if len(subsplit) < 2:
raise ValueError('Open ${ not followed by } before ' \
+ 'end of string or next ${ in "' \
+ string + '"')
varname = subsplit[0]
if varname == 'PYLEARN2_TRAIN_FILE_NAME':
warnings.warn("PYLEARN2_TRAIN_FILE_NAME is deprecated and may be "
"removed from the library on or after Oct 22, 2013. Switch"
" to PYLEARN2_TRAIN_FILE_FULL_STEM")
try:
val = os.environ[varname]
except KeyError:
if varname == 'PYLEARN2_DATA_PATH':
raise NoDataPathError()
if varname == 'PYLEARN2_VIEWER_COMMAND':
raise EnvironmentVariableError(environment_variable_essay)
raise ValueError('Unrecognized environment variable "' + varname
+ '". Did you mean ' + match(varname, os.environ.keys())
+ '?')
rval.append(val)
rval.append('}'.join(subsplit[1:]))
rval = ''.join(rval)
return rval | Preprocesses a string, by replacing ${VARNAME} with
os.environ['VARNAME']
Parameters
----------
string: the str object to preprocess
Returns
-------
the preprocessed string | Below is the the instruction that describes the task:
### Input:
Preprocesses a string, by replacing ${VARNAME} with
os.environ['VARNAME']
Parameters
----------
string: the str object to preprocess
Returns
-------
the preprocessed string
### Response:
def preprocess(string):
"""
Preprocesses a string, by replacing ${VARNAME} with
os.environ['VARNAME']
Parameters
----------
string: the str object to preprocess
Returns
-------
the preprocessed string
"""
split = string.split('${')
rval = [split[0]]
for candidate in split[1:]:
subsplit = candidate.split('}')
if len(subsplit) < 2:
raise ValueError('Open ${ not followed by } before ' \
+ 'end of string or next ${ in "' \
+ string + '"')
varname = subsplit[0]
if varname == 'PYLEARN2_TRAIN_FILE_NAME':
warnings.warn("PYLEARN2_TRAIN_FILE_NAME is deprecated and may be "
"removed from the library on or after Oct 22, 2013. Switch"
" to PYLEARN2_TRAIN_FILE_FULL_STEM")
try:
val = os.environ[varname]
except KeyError:
if varname == 'PYLEARN2_DATA_PATH':
raise NoDataPathError()
if varname == 'PYLEARN2_VIEWER_COMMAND':
raise EnvironmentVariableError(environment_variable_essay)
raise ValueError('Unrecognized environment variable "' + varname
+ '". Did you mean ' + match(varname, os.environ.keys())
+ '?')
rval.append(val)
rval.append('}'.join(subsplit[1:]))
rval = ''.join(rval)
return rval |
def assign(self, droplet_id):
"""
Assign a FloatingIP to a Droplet.
Args:
droplet_id: int - droplet id
"""
return self.get_data(
"floating_ips/%s/actions/" % self.ip,
type=POST,
params={"type": "assign", "droplet_id": droplet_id}
) | Assign a FloatingIP to a Droplet.
Args:
droplet_id: int - droplet id | Below is the the instruction that describes the task:
### Input:
Assign a FloatingIP to a Droplet.
Args:
droplet_id: int - droplet id
### Response:
def assign(self, droplet_id):
"""
Assign a FloatingIP to a Droplet.
Args:
droplet_id: int - droplet id
"""
return self.get_data(
"floating_ips/%s/actions/" % self.ip,
type=POST,
params={"type": "assign", "droplet_id": droplet_id}
) |
def generate_about_table(extra_info={}):
"""
Make a table with information about FlowCal and the current analysis.
Parameters
----------
extra_info : dict, optional
Additional keyword:value pairs to include in the table.
Returns
-------
about_table : DataFrame
Table with information about FlowCal and the current analysis, as
keyword:value pairs. The following keywords are included: FlowCal
version, and date and time of analysis. Keywords and values from
`extra_info` are also included.
"""
# Make keyword and value arrays
keywords = []
values = []
# FlowCal version
keywords.append('FlowCal version')
values.append(FlowCal.__version__)
# Analysis date and time
keywords.append('Date of analysis')
values.append(time.strftime("%Y/%m/%d"))
keywords.append('Time of analysis')
values.append(time.strftime("%I:%M:%S%p"))
# Add additional keyword:value pairs
for k, v in six.iteritems(extra_info):
keywords.append(k)
values.append(v)
# Make table as data frame
about_table = pd.DataFrame(values, index=keywords)
# Set column names
about_table.columns = ['Value']
about_table.index.name = 'Keyword'
return about_table | Make a table with information about FlowCal and the current analysis.
Parameters
----------
extra_info : dict, optional
Additional keyword:value pairs to include in the table.
Returns
-------
about_table : DataFrame
Table with information about FlowCal and the current analysis, as
keyword:value pairs. The following keywords are included: FlowCal
version, and date and time of analysis. Keywords and values from
`extra_info` are also included. | Below is the the instruction that describes the task:
### Input:
Make a table with information about FlowCal and the current analysis.
Parameters
----------
extra_info : dict, optional
Additional keyword:value pairs to include in the table.
Returns
-------
about_table : DataFrame
Table with information about FlowCal and the current analysis, as
keyword:value pairs. The following keywords are included: FlowCal
version, and date and time of analysis. Keywords and values from
`extra_info` are also included.
### Response:
def generate_about_table(extra_info={}):
"""
Make a table with information about FlowCal and the current analysis.
Parameters
----------
extra_info : dict, optional
Additional keyword:value pairs to include in the table.
Returns
-------
about_table : DataFrame
Table with information about FlowCal and the current analysis, as
keyword:value pairs. The following keywords are included: FlowCal
version, and date and time of analysis. Keywords and values from
`extra_info` are also included.
"""
# Make keyword and value arrays
keywords = []
values = []
# FlowCal version
keywords.append('FlowCal version')
values.append(FlowCal.__version__)
# Analysis date and time
keywords.append('Date of analysis')
values.append(time.strftime("%Y/%m/%d"))
keywords.append('Time of analysis')
values.append(time.strftime("%I:%M:%S%p"))
# Add additional keyword:value pairs
for k, v in six.iteritems(extra_info):
keywords.append(k)
values.append(v)
# Make table as data frame
about_table = pd.DataFrame(values, index=keywords)
# Set column names
about_table.columns = ['Value']
about_table.index.name = 'Keyword'
return about_table |
def remove_label(self, name):
"""Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool
"""
url = self._build_url('labels', name, base_url=self._api)
# Docs say it should be a list of strings returned, practice says it
# is just a 204/404 response. I'm tenatively changing this until I
# hear back from Support.
return self._boolean(self._delete(url), 204, 404) | Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool | Below is the the instruction that describes the task:
### Input:
Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool
### Response:
def remove_label(self, name):
"""Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool
"""
url = self._build_url('labels', name, base_url=self._api)
# Docs say it should be a list of strings returned, practice says it
# is just a 204/404 response. I'm tenatively changing this until I
# hear back from Support.
return self._boolean(self._delete(url), 204, 404) |
def _draw_placeholder(self):
"""To be used in QTreeView"""
if self.model().rowCount() == 0:
painter = QPainter(self.viewport())
painter.setFont(_custom_font(is_italic=True))
painter.drawText(self.rect().adjusted(0, 0, -5, -5), Qt.AlignCenter | Qt.TextWordWrap,
self.PLACEHOLDER) | To be used in QTreeView | Below is the the instruction that describes the task:
### Input:
To be used in QTreeView
### Response:
def _draw_placeholder(self):
"""To be used in QTreeView"""
if self.model().rowCount() == 0:
painter = QPainter(self.viewport())
painter.setFont(_custom_font(is_italic=True))
painter.drawText(self.rect().adjusted(0, 0, -5, -5), Qt.AlignCenter | Qt.TextWordWrap,
self.PLACEHOLDER) |
def get_file_path(self, digest):
"""Retrieve the absolute path to the file with the given digest
Args:
digest -- digest of the file
Returns:
String rapresenting the absolute path of the file
"""
relPath = Fsdb.generate_tree_path(digest, self._conf['depth'])
return os.path.join(self.fsdbRoot, relPath) | Retrieve the absolute path to the file with the given digest
Args:
digest -- digest of the file
Returns:
String rapresenting the absolute path of the file | Below is the the instruction that describes the task:
### Input:
Retrieve the absolute path to the file with the given digest
Args:
digest -- digest of the file
Returns:
String rapresenting the absolute path of the file
### Response:
def get_file_path(self, digest):
"""Retrieve the absolute path to the file with the given digest
Args:
digest -- digest of the file
Returns:
String rapresenting the absolute path of the file
"""
relPath = Fsdb.generate_tree_path(digest, self._conf['depth'])
return os.path.join(self.fsdbRoot, relPath) |
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first object of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case insensitive.
| If USER_IFIND_MODE is 'nocase_collation' this method maps to find_first_object().
| If USER_IFIND_MODE is 'ifind' this method performs a case insensitive find.
"""
# Call regular find() if USER_IFIND_MODE is nocase_collation
if self.user_manager.USER_IFIND_MODE=='nocase_collation':
return self.find_first_object(ObjectClass, **kwargs)
# Convert each name/value pair in 'kwargs' into a filter
query = ObjectClass.query
for field_name, field_value in kwargs.items():
# Make sure that ObjectClass has a 'field_name' property
field = getattr(ObjectClass, field_name, None)
if field is None:
raise KeyError("BaseAlchemyAdapter.find_first_object(): Class '%s' has no field '%s'." % (ObjectClass, field_name))
# Add a case sensitive filter to the query
query = query.filter(field.ifind(field_value)) # case insensitive!!
# Execute query
return query.first() | Retrieve the first object of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case insensitive.
| If USER_IFIND_MODE is 'nocase_collation' this method maps to find_first_object().
| If USER_IFIND_MODE is 'ifind' this method performs a case insensitive find. | Below is the the instruction that describes the task:
### Input:
Retrieve the first object of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case insensitive.
| If USER_IFIND_MODE is 'nocase_collation' this method maps to find_first_object().
| If USER_IFIND_MODE is 'ifind' this method performs a case insensitive find.
### Response:
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first object of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case insensitive.
| If USER_IFIND_MODE is 'nocase_collation' this method maps to find_first_object().
| If USER_IFIND_MODE is 'ifind' this method performs a case insensitive find.
"""
# Call regular find() if USER_IFIND_MODE is nocase_collation
if self.user_manager.USER_IFIND_MODE=='nocase_collation':
return self.find_first_object(ObjectClass, **kwargs)
# Convert each name/value pair in 'kwargs' into a filter
query = ObjectClass.query
for field_name, field_value in kwargs.items():
# Make sure that ObjectClass has a 'field_name' property
field = getattr(ObjectClass, field_name, None)
if field is None:
raise KeyError("BaseAlchemyAdapter.find_first_object(): Class '%s' has no field '%s'." % (ObjectClass, field_name))
# Add a case sensitive filter to the query
query = query.filter(field.ifind(field_value)) # case insensitive!!
# Execute query
return query.first() |
def errors(self):
"""
Returns a TupleErrorList for this field. This overloaded method adds additional error lists
to the errors as detected by the form validator.
"""
if not hasattr(self, '_errors_cache'):
self._errors_cache = self.form.get_field_errors(self)
return self._errors_cache | Returns a TupleErrorList for this field. This overloaded method adds additional error lists
to the errors as detected by the form validator. | Below is the the instruction that describes the task:
### Input:
Returns a TupleErrorList for this field. This overloaded method adds additional error lists
to the errors as detected by the form validator.
### Response:
def errors(self):
"""
Returns a TupleErrorList for this field. This overloaded method adds additional error lists
to the errors as detected by the form validator.
"""
if not hasattr(self, '_errors_cache'):
self._errors_cache = self.form.get_field_errors(self)
return self._errors_cache |
def request_data(self):
'''在调用dialog.run()之前先调用这个函数来获取数据'''
def on_tasks_received(info, error=None):
if error or not info:
logger.error('BTBrowserDialog.on_tasks_received: %s, %s.' %
(info, error))
return
if 'magnet_info' in info:
tasks = info['magnet_info']
elif 'torrent_info' in info:
tasks = info['torrent_info']['file_info']
self.file_sha1 = info['torrent_info']['sha1']
elif 'error_code' in info:
logger.error('BTBrowserDialog.on_tasks_received: %s, %s.' %
(info, error))
self.app.toast(info.get('error_msg', ''))
return
else:
logger.error('BTBrowserDialog.on_tasks_received: %s, %s.' %
(info, error))
self.app.toast(_('Unknown error occured: %s') % info)
return
for task in tasks:
size = int(task['size'])
human_size = util.get_human_size(size)[0]
select = (size > MIN_SIZE_TO_CHECK or
task['file_name'].endswith(CHECK_EXT))
self.liststore.append([
select,
task['file_name'],
size,
human_size,
])
if self.source_url.startswith('magnet'):
gutil.async_call(pcs.cloud_query_magnetinfo, self.app.cookie,
self.app.tokens, self.source_url, self.save_path,
callback=on_tasks_received)
else:
gutil.async_call(pcs.cloud_query_sinfo, self.app.cookie,
self.app.tokens, self.source_url,
callback=on_tasks_received) | 在调用dialog.run()之前先调用这个函数来获取数据 | Below is the the instruction that describes the task:
### Input:
在调用dialog.run()之前先调用这个函数来获取数据
### Response:
def request_data(self):
'''在调用dialog.run()之前先调用这个函数来获取数据'''
def on_tasks_received(info, error=None):
if error or not info:
logger.error('BTBrowserDialog.on_tasks_received: %s, %s.' %
(info, error))
return
if 'magnet_info' in info:
tasks = info['magnet_info']
elif 'torrent_info' in info:
tasks = info['torrent_info']['file_info']
self.file_sha1 = info['torrent_info']['sha1']
elif 'error_code' in info:
logger.error('BTBrowserDialog.on_tasks_received: %s, %s.' %
(info, error))
self.app.toast(info.get('error_msg', ''))
return
else:
logger.error('BTBrowserDialog.on_tasks_received: %s, %s.' %
(info, error))
self.app.toast(_('Unknown error occured: %s') % info)
return
for task in tasks:
size = int(task['size'])
human_size = util.get_human_size(size)[0]
select = (size > MIN_SIZE_TO_CHECK or
task['file_name'].endswith(CHECK_EXT))
self.liststore.append([
select,
task['file_name'],
size,
human_size,
])
if self.source_url.startswith('magnet'):
gutil.async_call(pcs.cloud_query_magnetinfo, self.app.cookie,
self.app.tokens, self.source_url, self.save_path,
callback=on_tasks_received)
else:
gutil.async_call(pcs.cloud_query_sinfo, self.app.cookie,
self.app.tokens, self.source_url,
callback=on_tasks_received) |
def compile(self, csdl):
""" Compile the given CSDL.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/compile
Raises a DataSiftApiException for any error given by the REST API, including CSDL compilation.
:param csdl: CSDL to compile
:type csdl: str
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.post('compile', data=dict(csdl=csdl)) | Compile the given CSDL.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/compile
Raises a DataSiftApiException for any error given by the REST API, including CSDL compilation.
:param csdl: CSDL to compile
:type csdl: str
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` | Below is the the instruction that describes the task:
### Input:
Compile the given CSDL.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/compile
Raises a DataSiftApiException for any error given by the REST API, including CSDL compilation.
:param csdl: CSDL to compile
:type csdl: str
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
### Response:
def compile(self, csdl):
""" Compile the given CSDL.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/compile
Raises a DataSiftApiException for any error given by the REST API, including CSDL compilation.
:param csdl: CSDL to compile
:type csdl: str
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.post('compile', data=dict(csdl=csdl)) |
def write(self, values):
"""
Write values to the targeted documents
Values need to be a dict as : {document_id: value}
"""
# Insert only for docs targeted by the target
filtered = {_id: value for _id, value in values.items() if _id in self._document_ids}
if not filtered:
return
bulk = self.get_collection().initialize_ordered_bulk_op()
for _id, value in filtered.items():
bulk.find({'_id': _id}).upsert() \
.update_one({'$set': {self._field: value}})
bulk.execute() | Write values to the targeted documents
Values need to be a dict as : {document_id: value} | Below is the the instruction that describes the task:
### Input:
Write values to the targeted documents
Values need to be a dict as : {document_id: value}
### Response:
def write(self, values):
"""
Write values to the targeted documents
Values need to be a dict as : {document_id: value}
"""
# Insert only for docs targeted by the target
filtered = {_id: value for _id, value in values.items() if _id in self._document_ids}
if not filtered:
return
bulk = self.get_collection().initialize_ordered_bulk_op()
for _id, value in filtered.items():
bulk.find({'_id': _id}).upsert() \
.update_one({'$set': {self._field: value}})
bulk.execute() |
def find_output_without_tag(self, tag):
"""
Find all files who do not have tag in self.tags
"""
# Enforce upper case
tag = tag.upper()
return FileList([i for i in self if tag not in i.tags]) | Find all files who do not have tag in self.tags | Below is the the instruction that describes the task:
### Input:
Find all files who do not have tag in self.tags
### Response:
def find_output_without_tag(self, tag):
"""
Find all files who do not have tag in self.tags
"""
# Enforce upper case
tag = tag.upper()
return FileList([i for i in self if tag not in i.tags]) |
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options) | Equals :meth:`route` with a ``DELETE`` method parameter. | Below is the the instruction that describes the task:
### Input:
Equals :meth:`route` with a ``DELETE`` method parameter.
### Response:
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.