code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def load_messages(self, directory, catalogue):
"""
Loads translation found in a directory.
@type directory: string
@param directory: The directory to search
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@raises: ValueError
"""
if not os.path.isdir(directory):
raise ValueError("{0} is not a directory".format(directory))
for format, loader in list(self.loaders.items()):
extension = "{0}.{1}".format(catalogue.locale, format)
files = find_files(directory, "*.{0}".format(extension))
for file in files:
domain = file.split("/")[-1][:-1 * len(extension) - 1]
catalogue.add_catalogue(
loader.load(
file,
catalogue.locale,
domain)) | Loads translation found in a directory.
@type directory: string
@param directory: The directory to search
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@raises: ValueError | Below is the the instruction that describes the task:
### Input:
Loads translation found in a directory.
@type directory: string
@param directory: The directory to search
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@raises: ValueError
### Response:
def load_messages(self, directory, catalogue):
"""
Loads translation found in a directory.
@type directory: string
@param directory: The directory to search
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@raises: ValueError
"""
if not os.path.isdir(directory):
raise ValueError("{0} is not a directory".format(directory))
for format, loader in list(self.loaders.items()):
extension = "{0}.{1}".format(catalogue.locale, format)
files = find_files(directory, "*.{0}".format(extension))
for file in files:
domain = file.split("/")[-1][:-1 * len(extension) - 1]
catalogue.add_catalogue(
loader.load(
file,
catalogue.locale,
domain)) |
def directory_is_present(self, directory_path):
"""
check if directory 'directory_path' is present, raise IOError if it's not a directory
:param directory_path: str, directory to check
:return: True if directory exists, False if directory does not exist
"""
p = self.p(directory_path)
if not os.path.exists(p):
return False
if not os.path.isdir(p):
raise IOError("%s is not a directory" % directory_path)
return True | check if directory 'directory_path' is present, raise IOError if it's not a directory
:param directory_path: str, directory to check
:return: True if directory exists, False if directory does not exist | Below is the the instruction that describes the task:
### Input:
check if directory 'directory_path' is present, raise IOError if it's not a directory
:param directory_path: str, directory to check
:return: True if directory exists, False if directory does not exist
### Response:
def directory_is_present(self, directory_path):
"""
check if directory 'directory_path' is present, raise IOError if it's not a directory
:param directory_path: str, directory to check
:return: True if directory exists, False if directory does not exist
"""
p = self.p(directory_path)
if not os.path.exists(p):
return False
if not os.path.isdir(p):
raise IOError("%s is not a directory" % directory_path)
return True |
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE | Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024. | Below is the the instruction that describes the task:
### Input:
Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
### Response:
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE |
def tdSensor(self):
"""Get the next sensor while iterating.
:return: a dict with the keys: protocol, model, id, datatypes.
"""
protocol = create_string_buffer(20)
model = create_string_buffer(20)
sid = c_int()
datatypes = c_int()
self._lib.tdSensor(protocol, sizeof(protocol), model, sizeof(model),
byref(sid), byref(datatypes))
return {'protocol': self._to_str(protocol),
'model': self._to_str(model),
'id': sid.value, 'datatypes': datatypes.value} | Get the next sensor while iterating.
:return: a dict with the keys: protocol, model, id, datatypes. | Below is the the instruction that describes the task:
### Input:
Get the next sensor while iterating.
:return: a dict with the keys: protocol, model, id, datatypes.
### Response:
def tdSensor(self):
"""Get the next sensor while iterating.
:return: a dict with the keys: protocol, model, id, datatypes.
"""
protocol = create_string_buffer(20)
model = create_string_buffer(20)
sid = c_int()
datatypes = c_int()
self._lib.tdSensor(protocol, sizeof(protocol), model, sizeof(model),
byref(sid), byref(datatypes))
return {'protocol': self._to_str(protocol),
'model': self._to_str(model),
'id': sid.value, 'datatypes': datatypes.value} |
def describe_directory(self, path):
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
flist = conn.listdir_attr(path)
files = {}
for f in flist:
modify = datetime.datetime.fromtimestamp(
f.st_mtime).strftime('%Y%m%d%H%M%S')
files[f.filename] = {
'size': f.st_size,
'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file',
'modify': modify}
return files | Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
### Response:
def describe_directory(self, path):
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
flist = conn.listdir_attr(path)
files = {}
for f in flist:
modify = datetime.datetime.fromtimestamp(
f.st_mtime).strftime('%Y%m%d%H%M%S')
files[f.filename] = {
'size': f.st_size,
'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file',
'modify': modify}
return files |
def contains (self, point):
"""contains(point) -> True | False
Returns True if point is contained inside this Rectangle, False otherwise.
Examples:
>>> r = Rect( Point(-1, -1), Point(1, 1) )
>>> r.contains( Point(0, 0) )
True
>>> r.contains( Point(2, 3) )
False
"""
return (point.x >= self.ul.x and point.x <= self.lr.x) and \
(point.y >= self.ul.y and point.y <= self.lr.y) | contains(point) -> True | False
Returns True if point is contained inside this Rectangle, False otherwise.
Examples:
>>> r = Rect( Point(-1, -1), Point(1, 1) )
>>> r.contains( Point(0, 0) )
True
>>> r.contains( Point(2, 3) )
False | Below is the the instruction that describes the task:
### Input:
contains(point) -> True | False
Returns True if point is contained inside this Rectangle, False otherwise.
Examples:
>>> r = Rect( Point(-1, -1), Point(1, 1) )
>>> r.contains( Point(0, 0) )
True
>>> r.contains( Point(2, 3) )
False
### Response:
def contains (self, point):
"""contains(point) -> True | False
Returns True if point is contained inside this Rectangle, False otherwise.
Examples:
>>> r = Rect( Point(-1, -1), Point(1, 1) )
>>> r.contains( Point(0, 0) )
True
>>> r.contains( Point(2, 3) )
False
"""
return (point.x >= self.ul.x and point.x <= self.lr.x) and \
(point.y >= self.ul.y and point.y <= self.lr.y) |
def add(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(data_uid) - data UID (if uoa is an alias)
(data_name) - user friendly data name
(dict_from_cid) -
(dict_from_repo_uoa) -
(dict_from_module_uoa) -
(dict_from_data_uoa) - if present, pre-load dict
from this (module_uoa):data_uoa (analog of copy)
(update) - if == 'yes' and entry exists, update it
(dict) - meta description to record
(substitute) - if 'yes' and update=='yes' substitute dictionaries, otherwise merge!
(desc) - description of an entry (gradually adding API description in flat format)
(extra_json_files) - dict with extra json files to save to entry (key is a filename)
(tags) - list or comma separated list of tags to add to entry
(info) - entry info to record - normally, should not use it!
(extra_info) - enforce extra info such as
author
author_email
author_webpage
license
copyright
If not specified then taken from kernel (prefix 'default_')
(updates) - entry updates info to record - normally, should not use it!
(ignore_update) - if 'yes', do not add info about update
(ask) - if 'yes', ask questions, otherwise silent
(unlock_uid) - unlock UID if was previously locked
(sort_keys) - by default, 'yes'
(share) - if 'yes', try to add via GIT
}
Output: {
return - return code = 0, if successful
16, if entry already exists
> 0, if error
(error) - error text if return > 0
Output from the 'create_entry' function
}
"""
o=i.get('out','')
t='added'
ra=i.get('repo_uoa','')
m=i.get('module_uoa','')
d=i.get('data_uoa','')
di=i.get('data_uid','')
dn=i.get('data_name','')
uuid=i.get('unlock_uid','')
up=i.get('update','')
ask=i.get('ask','')
# Get repo path
r=find_path_to_repo({'repo_uoa':ra})
if r['return']>0: return r
pr=r['path']
ruoa=r['repo_uoa']
ruid=r['repo_uid']
ralias=r['repo_alias']
rd=r['dict']
rshared=rd.get('shared','')
rsync=rd.get('sync','')
if i.get('share','')=='yes': rsync='yes'
# Check if writing is allowed
ii={'module_uoa':m, 'repo_uoa':r['repo_uoa'], 'repo_uid':r['repo_uid'], 'repo_dict':rd}
r=check_writing(ii)
if r['return']>0: return r
# Load info about module
r=load({'module_uoa':cfg['module_name'],
'data_uoa':m})
if r['return']>0: return r
elif r['return']==16:
return {'return':8, 'error':'can\'t find path to module "'+m+'"'}
muoa=r['data_uoa']
muid=r['data_uid']
malias=r['data_alias']
pm=r['path']
uid=r['data_uid']
alias=r['data_alias']
if alias=='': alias=uid
module_desc=r['dict']
# Ask additional questions
if o=='con' and ask=='yes':
# Asking for alias
if d=='' or is_uid(d):
r=inp({'text':'Enter an alias (or Enter to skip it): '})
d=r['string']
# Asking for user-friendly name
if dn=='' and up!='yes':
r=inp({'text':'Enter a user-friendly name of this entry (or Enter to reuse alias): '})
dn=r['string']
# Load dictionary from other entry if needed
dfcid=i.get('dict_from_cid','')
dfruoa=i.get('dict_from_repo_uoa','')
dfmuoa=i.get('dict_from_module_uoa','')
dfduoa=i.get('dict_from_data_uoa','')
if dfcid!='':
r=parse_cid({'cid':dfcid})
if r['return']>0: return r
dfruoa=r.get('repo_uoa','')
dfmuoa=r.get('module_uoa','')
dfduoa=r.get('data_uoa','')
if d!='' and not is_uoa(d):
return {'return':1, 'error':'alias has disallowed characters'}
if dfduoa!='':
if dfmuoa=='': dfmuoa=m
ii={'module_uoa':dfmuoa, 'data_uoa':dfduoa}
if dfruoa!='': ii['repo_uoa']=dfruoa
r=load(ii)
if r['return']>0: return r
df=r.get('dict',{})
# Create first level entry (module)
r=create_entry({'path':pr, 'data_uoa':alias, 'data_uid':uid})
if r['return']>0 and r['return']!=16: return r
p1=r['path']
# Create second level entry (data)
i1={'path':p1}
pdd=''
if di!='':
i1['data_uid']=di
if d!='':
i1['data_uoa']=d
rr=create_entry(i1)
if rr['return']>0 and rr['return']!=16: return rr
duid=rr['data_uid']
pdd=rr['data_uoa']
dalias=rr['data_alias']
# Preparing meta-description
a={}
info={}
updates={}
desc={}
p2=rr['path']
p3=os.path.join(p2, cfg['subdir_ck_ext'])
p4=os.path.join(p3, cfg['file_meta'])
p4i=os.path.join(p3, cfg['file_info'])
p4u=os.path.join(p3, cfg['file_updates'])
p4d=os.path.join(p3, cfg['file_desc'])
# If last entry exists
if rr['return']==16:
if up=='yes':
t='updated'
# Check if locked
rl=check_lock({'path':p2, 'unlock_uid':uuid})
if rl['return']>0:
if rl['return']==32:
rl['data_uoa']=pdd
rl['data_uid']=duid
return rl
# Entry exists, load configuration if update
r2=load_meta_from_path({'path':p2})
if r2['return']>0: return r2
a=r2['dict']
info=r2.get('info',{})
updates=r2.get('updates',{})
desc=r2.get('desc',{})
if dn=='': dn=info.get('data_name','')
else:
return {'return':16,'error':'entry already exists in path ('+p2+')'}
else:
# Create configuration directory
if not os.path.isdir(p3):
try:
os.mkdir(p3)
except Exception as e:
return {'return':1, 'error':format(e)}
if dn=='' and not is_uid(d):
dn=d
if dfduoa!='':
r=merge_dicts({'dict1':a, 'dict2':df})
if r['return']>0: return r
# If dict, info and updates are in input, try to merge ...
cma=i.get('dict',{})
cmad=i.get('desc',{})
if i.get('substitute','')=='yes':
a=cma
desc=cmad
else:
r=merge_dicts({'dict1':a, 'dict2':cma})
if r['return']>0: return r
r=merge_dicts({'dict1':desc, 'dict2':cmad})
if r['return']>0: return r
# Check tags
xtags=a.get('tags',[])
tags=i.get('tags','')
if tags=='': tags=[]
elif type(tags)!=list:
tags=tags.split(',')
for l in range(0,len(tags)):
ll=tags[l].strip()
if ll not in xtags:
xtags.append(ll)
if len(xtags)>0:
a['tags']=xtags
# Process info
cminfo=i.get('info',{})
if len(cminfo)!=0:
info=cminfo
# r=merge_dicts({'dict1':info, 'dict2':cminfo})
# if r['return']>0: return r
cmupdates=i.get('updates',{})
if len(cmupdates)!=0:
updates=cmupdates
# r=merge_dicts({'dict1':updates, 'dict2':cmupdates})
# if r['return']>0: return r
# If name exists, add
info['backup_module_uoa']=muoa
info['backup_module_uid']=muid
info['backup_data_uid']=duid
if dn!='': info['data_name']=dn
# Add control info
ri=prepare_special_info_about_entry({})
if ri['return']>0: return ri
x=ri['dict']
# Check if pre-set control params such as author, copyright, license
ei=i.get('extra_info',{})
if len(ei)!=0: x.update(ei)
y=info.get('control',{})
if i.get('ignore_update','')!='yes':
if len(y)==0:
info['control']=x
else:
y=updates.get('control',[])
y.append(x)
updates['control']=y
sk=i.get('sort_keys','')
if sk=='': sk='yes'
if len(updates)>0:
# Record updates
rx=save_json_to_file({'json_file':p4u, 'dict':updates, 'sort_keys':sk})
if rx['return']>0: return rx
# Record meta description
rx=save_json_to_file({'json_file':p4, 'dict':a, 'sort_keys':sk})
if rx['return']>0: return rx
# Record info
rx=save_json_to_file({'json_file':p4i, 'dict':info, 'sort_keys':sk})
if rx['return']>0: return rx
# Record desc
rx=save_json_to_file({'json_file':p4d, 'dict':desc, 'sort_keys':sk})
if rx['return']>0: return rx
# Record extra files if there
ejf=i.get('extra_json_files',{})
if len(ejf)>0:
for ff in ejf:
dff=ejf[ff]
rz=save_json_to_file({'json_file':os.path.join(p2,ff), 'dict':dff, 'sort_keys':sk})
if rz['return']>0: return rz
if o=='con':
out('Entry '+d+' ('+duid+', '+p2+') '+t+' successfully!')
# Check if needs to be synced
if rshared!='' and rsync=='yes':
ppp=os.getcwd()
os.chdir(pr)
if os.path.isdir(cfg['subdir_ck_ext']):
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', cfg['subdir_ck_ext'])
rx=os.system(ss)
os.chdir(p1)
if os.path.isdir(cfg['subdir_ck_ext']):
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', cfg['subdir_ck_ext'])
rx=os.system(ss)
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', pdd)
rx=os.system(ss)
os.chdir(ppp)
# Prepare output
rr={'return':0,
'dict': a,
'info': info,
'updates': updates,
'path':p2,
'path_module': pm,
'path_repo': pr,
'repo_uoa':ruoa,
'repo_uid':ruid,
'repo_alias':ralias,
'module_uoa':muoa,
'module_uid':muid,
'module_alias':malias,
'data_uoa':pdd,
'data_uid':duid,
'data_alias':dalias,
'data_name':dn}
# Check if need to add index
if cfg.get('use_indexing','')=='yes':
muid=rr['module_uid']
duid=rr['data_uid']
path='/'+muid+'/'+duid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
ri=access_index_server({'request':'PUT', 'path':path, 'dict':rr})
if ri['return']>0: return ri
# Remove lock after update if needed
if uuid!='':
pl=os.path.join(p2, cfg['subdir_ck_ext'], cfg['file_for_lock'])
if os.path.isfile(pl): os.remove(pl)
rr['return']=0
return rr | Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(data_uid) - data UID (if uoa is an alias)
(data_name) - user friendly data name
(dict_from_cid) -
(dict_from_repo_uoa) -
(dict_from_module_uoa) -
(dict_from_data_uoa) - if present, pre-load dict
from this (module_uoa):data_uoa (analog of copy)
(update) - if == 'yes' and entry exists, update it
(dict) - meta description to record
(substitute) - if 'yes' and update=='yes' substitute dictionaries, otherwise merge!
(desc) - description of an entry (gradually adding API description in flat format)
(extra_json_files) - dict with extra json files to save to entry (key is a filename)
(tags) - list or comma separated list of tags to add to entry
(info) - entry info to record - normally, should not use it!
(extra_info) - enforce extra info such as
author
author_email
author_webpage
license
copyright
If not specified then taken from kernel (prefix 'default_')
(updates) - entry updates info to record - normally, should not use it!
(ignore_update) - if 'yes', do not add info about update
(ask) - if 'yes', ask questions, otherwise silent
(unlock_uid) - unlock UID if was previously locked
(sort_keys) - by default, 'yes'
(share) - if 'yes', try to add via GIT
}
Output: {
return - return code = 0, if successful
16, if entry already exists
> 0, if error
(error) - error text if return > 0
Output from the 'create_entry' function
} | Below is the the instruction that describes the task:
### Input:
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(data_uid) - data UID (if uoa is an alias)
(data_name) - user friendly data name
(dict_from_cid) -
(dict_from_repo_uoa) -
(dict_from_module_uoa) -
(dict_from_data_uoa) - if present, pre-load dict
from this (module_uoa):data_uoa (analog of copy)
(update) - if == 'yes' and entry exists, update it
(dict) - meta description to record
(substitute) - if 'yes' and update=='yes' substitute dictionaries, otherwise merge!
(desc) - description of an entry (gradually adding API description in flat format)
(extra_json_files) - dict with extra json files to save to entry (key is a filename)
(tags) - list or comma separated list of tags to add to entry
(info) - entry info to record - normally, should not use it!
(extra_info) - enforce extra info such as
author
author_email
author_webpage
license
copyright
If not specified then taken from kernel (prefix 'default_')
(updates) - entry updates info to record - normally, should not use it!
(ignore_update) - if 'yes', do not add info about update
(ask) - if 'yes', ask questions, otherwise silent
(unlock_uid) - unlock UID if was previously locked
(sort_keys) - by default, 'yes'
(share) - if 'yes', try to add via GIT
}
Output: {
return - return code = 0, if successful
16, if entry already exists
> 0, if error
(error) - error text if return > 0
Output from the 'create_entry' function
}
### Response:
def add(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(data_uid) - data UID (if uoa is an alias)
(data_name) - user friendly data name
(dict_from_cid) -
(dict_from_repo_uoa) -
(dict_from_module_uoa) -
(dict_from_data_uoa) - if present, pre-load dict
from this (module_uoa):data_uoa (analog of copy)
(update) - if == 'yes' and entry exists, update it
(dict) - meta description to record
(substitute) - if 'yes' and update=='yes' substitute dictionaries, otherwise merge!
(desc) - description of an entry (gradually adding API description in flat format)
(extra_json_files) - dict with extra json files to save to entry (key is a filename)
(tags) - list or comma separated list of tags to add to entry
(info) - entry info to record - normally, should not use it!
(extra_info) - enforce extra info such as
author
author_email
author_webpage
license
copyright
If not specified then taken from kernel (prefix 'default_')
(updates) - entry updates info to record - normally, should not use it!
(ignore_update) - if 'yes', do not add info about update
(ask) - if 'yes', ask questions, otherwise silent
(unlock_uid) - unlock UID if was previously locked
(sort_keys) - by default, 'yes'
(share) - if 'yes', try to add via GIT
}
Output: {
return - return code = 0, if successful
16, if entry already exists
> 0, if error
(error) - error text if return > 0
Output from the 'create_entry' function
}
"""
o=i.get('out','')
t='added'
ra=i.get('repo_uoa','')
m=i.get('module_uoa','')
d=i.get('data_uoa','')
di=i.get('data_uid','')
dn=i.get('data_name','')
uuid=i.get('unlock_uid','')
up=i.get('update','')
ask=i.get('ask','')
# Get repo path
r=find_path_to_repo({'repo_uoa':ra})
if r['return']>0: return r
pr=r['path']
ruoa=r['repo_uoa']
ruid=r['repo_uid']
ralias=r['repo_alias']
rd=r['dict']
rshared=rd.get('shared','')
rsync=rd.get('sync','')
if i.get('share','')=='yes': rsync='yes'
# Check if writing is allowed
ii={'module_uoa':m, 'repo_uoa':r['repo_uoa'], 'repo_uid':r['repo_uid'], 'repo_dict':rd}
r=check_writing(ii)
if r['return']>0: return r
# Load info about module
r=load({'module_uoa':cfg['module_name'],
'data_uoa':m})
if r['return']>0: return r
elif r['return']==16:
return {'return':8, 'error':'can\'t find path to module "'+m+'"'}
muoa=r['data_uoa']
muid=r['data_uid']
malias=r['data_alias']
pm=r['path']
uid=r['data_uid']
alias=r['data_alias']
if alias=='': alias=uid
module_desc=r['dict']
# Ask additional questions
if o=='con' and ask=='yes':
# Asking for alias
if d=='' or is_uid(d):
r=inp({'text':'Enter an alias (or Enter to skip it): '})
d=r['string']
# Asking for user-friendly name
if dn=='' and up!='yes':
r=inp({'text':'Enter a user-friendly name of this entry (or Enter to reuse alias): '})
dn=r['string']
# Load dictionary from other entry if needed
dfcid=i.get('dict_from_cid','')
dfruoa=i.get('dict_from_repo_uoa','')
dfmuoa=i.get('dict_from_module_uoa','')
dfduoa=i.get('dict_from_data_uoa','')
if dfcid!='':
r=parse_cid({'cid':dfcid})
if r['return']>0: return r
dfruoa=r.get('repo_uoa','')
dfmuoa=r.get('module_uoa','')
dfduoa=r.get('data_uoa','')
if d!='' and not is_uoa(d):
return {'return':1, 'error':'alias has disallowed characters'}
if dfduoa!='':
if dfmuoa=='': dfmuoa=m
ii={'module_uoa':dfmuoa, 'data_uoa':dfduoa}
if dfruoa!='': ii['repo_uoa']=dfruoa
r=load(ii)
if r['return']>0: return r
df=r.get('dict',{})
# Create first level entry (module)
r=create_entry({'path':pr, 'data_uoa':alias, 'data_uid':uid})
if r['return']>0 and r['return']!=16: return r
p1=r['path']
# Create second level entry (data)
i1={'path':p1}
pdd=''
if di!='':
i1['data_uid']=di
if d!='':
i1['data_uoa']=d
rr=create_entry(i1)
if rr['return']>0 and rr['return']!=16: return rr
duid=rr['data_uid']
pdd=rr['data_uoa']
dalias=rr['data_alias']
# Preparing meta-description
a={}
info={}
updates={}
desc={}
p2=rr['path']
p3=os.path.join(p2, cfg['subdir_ck_ext'])
p4=os.path.join(p3, cfg['file_meta'])
p4i=os.path.join(p3, cfg['file_info'])
p4u=os.path.join(p3, cfg['file_updates'])
p4d=os.path.join(p3, cfg['file_desc'])
# If last entry exists
if rr['return']==16:
if up=='yes':
t='updated'
# Check if locked
rl=check_lock({'path':p2, 'unlock_uid':uuid})
if rl['return']>0:
if rl['return']==32:
rl['data_uoa']=pdd
rl['data_uid']=duid
return rl
# Entry exists, load configuration if update
r2=load_meta_from_path({'path':p2})
if r2['return']>0: return r2
a=r2['dict']
info=r2.get('info',{})
updates=r2.get('updates',{})
desc=r2.get('desc',{})
if dn=='': dn=info.get('data_name','')
else:
return {'return':16,'error':'entry already exists in path ('+p2+')'}
else:
# Create configuration directory
if not os.path.isdir(p3):
try:
os.mkdir(p3)
except Exception as e:
return {'return':1, 'error':format(e)}
if dn=='' and not is_uid(d):
dn=d
if dfduoa!='':
r=merge_dicts({'dict1':a, 'dict2':df})
if r['return']>0: return r
# If dict, info and updates are in input, try to merge ...
cma=i.get('dict',{})
cmad=i.get('desc',{})
if i.get('substitute','')=='yes':
a=cma
desc=cmad
else:
r=merge_dicts({'dict1':a, 'dict2':cma})
if r['return']>0: return r
r=merge_dicts({'dict1':desc, 'dict2':cmad})
if r['return']>0: return r
# Check tags
xtags=a.get('tags',[])
tags=i.get('tags','')
if tags=='': tags=[]
elif type(tags)!=list:
tags=tags.split(',')
for l in range(0,len(tags)):
ll=tags[l].strip()
if ll not in xtags:
xtags.append(ll)
if len(xtags)>0:
a['tags']=xtags
# Process info
cminfo=i.get('info',{})
if len(cminfo)!=0:
info=cminfo
# r=merge_dicts({'dict1':info, 'dict2':cminfo})
# if r['return']>0: return r
cmupdates=i.get('updates',{})
if len(cmupdates)!=0:
updates=cmupdates
# r=merge_dicts({'dict1':updates, 'dict2':cmupdates})
# if r['return']>0: return r
# If name exists, add
info['backup_module_uoa']=muoa
info['backup_module_uid']=muid
info['backup_data_uid']=duid
if dn!='': info['data_name']=dn
# Add control info
ri=prepare_special_info_about_entry({})
if ri['return']>0: return ri
x=ri['dict']
# Check if pre-set control params such as author, copyright, license
ei=i.get('extra_info',{})
if len(ei)!=0: x.update(ei)
y=info.get('control',{})
if i.get('ignore_update','')!='yes':
if len(y)==0:
info['control']=x
else:
y=updates.get('control',[])
y.append(x)
updates['control']=y
sk=i.get('sort_keys','')
if sk=='': sk='yes'
if len(updates)>0:
# Record updates
rx=save_json_to_file({'json_file':p4u, 'dict':updates, 'sort_keys':sk})
if rx['return']>0: return rx
# Record meta description
rx=save_json_to_file({'json_file':p4, 'dict':a, 'sort_keys':sk})
if rx['return']>0: return rx
# Record info
rx=save_json_to_file({'json_file':p4i, 'dict':info, 'sort_keys':sk})
if rx['return']>0: return rx
# Record desc
rx=save_json_to_file({'json_file':p4d, 'dict':desc, 'sort_keys':sk})
if rx['return']>0: return rx
# Record extra files if there
ejf=i.get('extra_json_files',{})
if len(ejf)>0:
for ff in ejf:
dff=ejf[ff]
rz=save_json_to_file({'json_file':os.path.join(p2,ff), 'dict':dff, 'sort_keys':sk})
if rz['return']>0: return rz
if o=='con':
out('Entry '+d+' ('+duid+', '+p2+') '+t+' successfully!')
# Check if needs to be synced
if rshared!='' and rsync=='yes':
ppp=os.getcwd()
os.chdir(pr)
if os.path.isdir(cfg['subdir_ck_ext']):
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', cfg['subdir_ck_ext'])
rx=os.system(ss)
os.chdir(p1)
if os.path.isdir(cfg['subdir_ck_ext']):
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', cfg['subdir_ck_ext'])
rx=os.system(ss)
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', pdd)
rx=os.system(ss)
os.chdir(ppp)
# Prepare output
rr={'return':0,
'dict': a,
'info': info,
'updates': updates,
'path':p2,
'path_module': pm,
'path_repo': pr,
'repo_uoa':ruoa,
'repo_uid':ruid,
'repo_alias':ralias,
'module_uoa':muoa,
'module_uid':muid,
'module_alias':malias,
'data_uoa':pdd,
'data_uid':duid,
'data_alias':dalias,
'data_name':dn}
# Check if need to add index
if cfg.get('use_indexing','')=='yes':
muid=rr['module_uid']
duid=rr['data_uid']
path='/'+muid+'/'+duid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
ri=access_index_server({'request':'PUT', 'path':path, 'dict':rr})
if ri['return']>0: return ri
# Remove lock after update if needed
if uuid!='':
pl=os.path.join(p2, cfg['subdir_ck_ext'], cfg['file_for_lock'])
if os.path.isfile(pl): os.remove(pl)
rr['return']=0
return rr |
def repr(self, changed_widgets=None):
"""Represents the widget as HTML format, packs all the attributes, children and so on.
Args:
client (App): Client instance.
changed_widgets (dict): A dictionary containing a collection of widgets that have to be updated.
The Widget that have to be updated is the key, and the value is its textual repr.
"""
if changed_widgets is None:
changed_widgets={}
return super(Widget, self).repr(changed_widgets) | Represents the widget as HTML format, packs all the attributes, children and so on.
Args:
client (App): Client instance.
changed_widgets (dict): A dictionary containing a collection of widgets that have to be updated.
The Widget that have to be updated is the key, and the value is its textual repr. | Below is the the instruction that describes the task:
### Input:
Represents the widget as HTML format, packs all the attributes, children and so on.
Args:
client (App): Client instance.
changed_widgets (dict): A dictionary containing a collection of widgets that have to be updated.
The Widget that have to be updated is the key, and the value is its textual repr.
### Response:
def repr(self, changed_widgets=None):
"""Represents the widget as HTML format, packs all the attributes, children and so on.
Args:
client (App): Client instance.
changed_widgets (dict): A dictionary containing a collection of widgets that have to be updated.
The Widget that have to be updated is the key, and the value is its textual repr.
"""
if changed_widgets is None:
changed_widgets={}
return super(Widget, self).repr(changed_widgets) |
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(
os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x)
for x in sys.path
)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, "__file__", None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv) | Finds all paths that should be observed. | Below is the the instruction that describes the task:
### Input:
Finds all paths that should be observed.
### Response:
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(
os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x)
for x in sys.path
)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, "__file__", None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv) |
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = 0
self.pos += 1
self.gotonext()
adlist = ""
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = 0
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = 1
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist | Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec. | Below is the the instruction that describes the task:
### Input:
Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
### Response:
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = 0
self.pos += 1
self.gotonext()
adlist = ""
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = 0
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = 1
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist |
def p_subidentifier_defval(self, p):
"""subidentifier_defval : LOWERCASE_IDENTIFIER '(' NUMBER ')'
| NUMBER"""
n = len(p)
if n == 2:
p[0] = ('subidentifier_defval', p[1])
elif n == 5:
p[0] = ('subidentifier_defval', p[1], p[3]) | subidentifier_defval : LOWERCASE_IDENTIFIER '(' NUMBER ')'
| NUMBER | Below is the the instruction that describes the task:
### Input:
subidentifier_defval : LOWERCASE_IDENTIFIER '(' NUMBER ')'
| NUMBER
### Response:
def p_subidentifier_defval(self, p):
"""subidentifier_defval : LOWERCASE_IDENTIFIER '(' NUMBER ')'
| NUMBER"""
n = len(p)
if n == 2:
p[0] = ('subidentifier_defval', p[1])
elif n == 5:
p[0] = ('subidentifier_defval', p[1], p[3]) |
def start_adc(self, channel, gain=1, data_rate=None):
"""Start continuous ADC conversions on the specified channel (0-3). Will
return an initial conversion result, then call the get_last_result()
function to read the most recent conversion result. Call stop_adc() to
stop conversions.
"""
assert 0 <= channel <= 3, 'Channel must be a value within 0-3!'
# Start continuous reads and set the mux value to the channel plus
# the highest bit (bit 3) set.
return self._read(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS) | Start continuous ADC conversions on the specified channel (0-3). Will
return an initial conversion result, then call the get_last_result()
function to read the most recent conversion result. Call stop_adc() to
stop conversions. | Below is the the instruction that describes the task:
### Input:
Start continuous ADC conversions on the specified channel (0-3). Will
return an initial conversion result, then call the get_last_result()
function to read the most recent conversion result. Call stop_adc() to
stop conversions.
### Response:
def start_adc(self, channel, gain=1, data_rate=None):
"""Start continuous ADC conversions on the specified channel (0-3). Will
return an initial conversion result, then call the get_last_result()
function to read the most recent conversion result. Call stop_adc() to
stop conversions.
"""
assert 0 <= channel <= 3, 'Channel must be a value within 0-3!'
# Start continuous reads and set the mux value to the channel plus
# the highest bit (bit 3) set.
return self._read(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS) |
def start(self):
"""
Try to init the main sub-components (:func:`~responsebot.utils.handler_utils.discover_handler_classes`, \
:func:`~responsebot.utils.auth_utils.auth`, :class:`~responsebot.responsebot_stream.ResponseBotStream`, etc.)
"""
logging.info('ResponseBot started')
handler_classes = handler_utils.discover_handler_classes(self.config.get('handlers_package'))
if len(handler_classes) == 0:
logging.warning('No handler found. Did you forget to extend BaseTweethandler? Check --handlers-module')
while True:
try:
client = auth_utils.auth(self.config)
listener = ResponseBotListener(client=client, handler_classes=handler_classes)
stream = ResponseBotStream(client=client, listener=listener)
stream.start()
except (APIQuotaError, AuthenticationError, TweepError) as e:
self.handle_error(e)
else:
break | Try to init the main sub-components (:func:`~responsebot.utils.handler_utils.discover_handler_classes`, \
:func:`~responsebot.utils.auth_utils.auth`, :class:`~responsebot.responsebot_stream.ResponseBotStream`, etc.) | Below is the the instruction that describes the task:
### Input:
Try to init the main sub-components (:func:`~responsebot.utils.handler_utils.discover_handler_classes`, \
:func:`~responsebot.utils.auth_utils.auth`, :class:`~responsebot.responsebot_stream.ResponseBotStream`, etc.)
### Response:
def start(self):
"""
Try to init the main sub-components (:func:`~responsebot.utils.handler_utils.discover_handler_classes`, \
:func:`~responsebot.utils.auth_utils.auth`, :class:`~responsebot.responsebot_stream.ResponseBotStream`, etc.)
"""
logging.info('ResponseBot started')
handler_classes = handler_utils.discover_handler_classes(self.config.get('handlers_package'))
if len(handler_classes) == 0:
logging.warning('No handler found. Did you forget to extend BaseTweethandler? Check --handlers-module')
while True:
try:
client = auth_utils.auth(self.config)
listener = ResponseBotListener(client=client, handler_classes=handler_classes)
stream = ResponseBotStream(client=client, listener=listener)
stream.start()
except (APIQuotaError, AuthenticationError, TweepError) as e:
self.handle_error(e)
else:
break |
def _detects_peaks(ecg_integrated, sample_rate):
"""
Detects peaks from local maximum
----------
Parameters
----------
ecg_integrated : ndarray
Array that contains the samples of the integrated signal.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
choosen_peaks : list
List of local maximums that pass the first stage of conditions needed to be considered as
a R peak.
possible_peaks : list
List with all the local maximums in the signal.
"""
# Minimum RR interval = 200 ms
min_rr = (sample_rate / 1000) * 200
# Computes all possible peaks and their amplitudes
possible_peaks = [i for i in range(0, len(ecg_integrated)-1)
if ecg_integrated[i-1] < ecg_integrated[i] and
ecg_integrated[i] > ecg_integrated[i+1]]
possible_amplitudes = [ecg_integrated[k] for k in possible_peaks]
chosen_peaks = []
# Starts with first peak
if not possible_peaks:
raise Exception("No Peaks Detected.")
peak_candidate_i = possible_peaks[0]
peak_candidate_amp = possible_amplitudes[0]
for peak_i, peak_amp in zip(possible_peaks, possible_amplitudes):
if peak_i - peak_candidate_i <= min_rr and peak_amp > peak_candidate_amp:
peak_candidate_i = peak_i
peak_candidate_amp = peak_amp
elif peak_i - peak_candidate_i > min_rr:
chosen_peaks += [peak_candidate_i - 6] # Delay of 6 samples
peak_candidate_i = peak_i
peak_candidate_amp = peak_amp
else:
pass
return chosen_peaks, possible_peaks | Detects peaks from local maximum
----------
Parameters
----------
ecg_integrated : ndarray
Array that contains the samples of the integrated signal.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
choosen_peaks : list
List of local maximums that pass the first stage of conditions needed to be considered as
a R peak.
possible_peaks : list
List with all the local maximums in the signal. | Below is the the instruction that describes the task:
### Input:
Detects peaks from local maximum
----------
Parameters
----------
ecg_integrated : ndarray
Array that contains the samples of the integrated signal.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
choosen_peaks : list
List of local maximums that pass the first stage of conditions needed to be considered as
a R peak.
possible_peaks : list
List with all the local maximums in the signal.
### Response:
def _detects_peaks(ecg_integrated, sample_rate):
"""
Detects peaks from local maximum
----------
Parameters
----------
ecg_integrated : ndarray
Array that contains the samples of the integrated signal.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
choosen_peaks : list
List of local maximums that pass the first stage of conditions needed to be considered as
a R peak.
possible_peaks : list
List with all the local maximums in the signal.
"""
# Minimum RR interval = 200 ms
min_rr = (sample_rate / 1000) * 200
# Computes all possible peaks and their amplitudes
possible_peaks = [i for i in range(0, len(ecg_integrated)-1)
if ecg_integrated[i-1] < ecg_integrated[i] and
ecg_integrated[i] > ecg_integrated[i+1]]
possible_amplitudes = [ecg_integrated[k] for k in possible_peaks]
chosen_peaks = []
# Starts with first peak
if not possible_peaks:
raise Exception("No Peaks Detected.")
peak_candidate_i = possible_peaks[0]
peak_candidate_amp = possible_amplitudes[0]
for peak_i, peak_amp in zip(possible_peaks, possible_amplitudes):
if peak_i - peak_candidate_i <= min_rr and peak_amp > peak_candidate_amp:
peak_candidate_i = peak_i
peak_candidate_amp = peak_amp
elif peak_i - peak_candidate_i > min_rr:
chosen_peaks += [peak_candidate_i - 6] # Delay of 6 samples
peak_candidate_i = peak_i
peak_candidate_amp = peak_amp
else:
pass
return chosen_peaks, possible_peaks |
def _recv_loop(self):
"""Service socket recv, returning responses to the correct queue"""
self._completed_response_lines = []
self._is_multiline = None
lines_iterator = self._get_lines()
while True:
try:
line = next(lines_iterator)
if self._is_multiline is None:
self._is_multiline = line.startswith("!") or line == "."
if line.startswith("ERR"):
self._respond(ValueError(line))
elif self._is_multiline:
if line == ".":
self._respond(self._completed_response_lines)
else:
assert line[0] == "!", \
"Multiline response {} doesn't start with !" \
.format(repr(line))
self._completed_response_lines.append(line[1:])
else:
self._respond(line)
except StopIteration:
return
except Exception:
log.exception("Exception receiving message")
raise | Service socket recv, returning responses to the correct queue | Below is the the instruction that describes the task:
### Input:
Service socket recv, returning responses to the correct queue
### Response:
def _recv_loop(self):
"""Service socket recv, returning responses to the correct queue"""
self._completed_response_lines = []
self._is_multiline = None
lines_iterator = self._get_lines()
while True:
try:
line = next(lines_iterator)
if self._is_multiline is None:
self._is_multiline = line.startswith("!") or line == "."
if line.startswith("ERR"):
self._respond(ValueError(line))
elif self._is_multiline:
if line == ".":
self._respond(self._completed_response_lines)
else:
assert line[0] == "!", \
"Multiline response {} doesn't start with !" \
.format(repr(line))
self._completed_response_lines.append(line[1:])
else:
self._respond(line)
except StopIteration:
return
except Exception:
log.exception("Exception receiving message")
raise |
def recarray_to_hdf5_group(ra, parent, name, **kwargs):
"""Write each column in a recarray to a dataset in an HDF5 group.
Parameters
----------
ra : recarray
Numpy recarray to store.
parent : string or h5py group
Parent HDF5 file or group. If a string, will be treated as HDF5 file
name.
name : string
Name or path of group to write data into.
kwargs : keyword arguments
Passed through to h5py require_dataset() function.
Returns
-------
h5g : h5py group
"""
import h5py
h5f = None
if isinstance(parent, str):
h5f = h5py.File(parent, mode='a')
parent = h5f
try:
h5g = parent.require_group(name)
for n in ra.dtype.names:
array_to_hdf5(ra[n], h5g, n, **kwargs)
return h5g
finally:
if h5f is not None:
h5f.close() | Write each column in a recarray to a dataset in an HDF5 group.
Parameters
----------
ra : recarray
Numpy recarray to store.
parent : string or h5py group
Parent HDF5 file or group. If a string, will be treated as HDF5 file
name.
name : string
Name or path of group to write data into.
kwargs : keyword arguments
Passed through to h5py require_dataset() function.
Returns
-------
h5g : h5py group | Below is the the instruction that describes the task:
### Input:
Write each column in a recarray to a dataset in an HDF5 group.
Parameters
----------
ra : recarray
Numpy recarray to store.
parent : string or h5py group
Parent HDF5 file or group. If a string, will be treated as HDF5 file
name.
name : string
Name or path of group to write data into.
kwargs : keyword arguments
Passed through to h5py require_dataset() function.
Returns
-------
h5g : h5py group
### Response:
def recarray_to_hdf5_group(ra, parent, name, **kwargs):
"""Write each column in a recarray to a dataset in an HDF5 group.
Parameters
----------
ra : recarray
Numpy recarray to store.
parent : string or h5py group
Parent HDF5 file or group. If a string, will be treated as HDF5 file
name.
name : string
Name or path of group to write data into.
kwargs : keyword arguments
Passed through to h5py require_dataset() function.
Returns
-------
h5g : h5py group
"""
import h5py
h5f = None
if isinstance(parent, str):
h5f = h5py.File(parent, mode='a')
parent = h5f
try:
h5g = parent.require_group(name)
for n in ra.dtype.names:
array_to_hdf5(ra[n], h5g, n, **kwargs)
return h5g
finally:
if h5f is not None:
h5f.close() |
def processLedger(self) -> None:
"""
Checks ledger for planned but not yet performed upgrades
and schedules upgrade for the most recent one
Assumption: Only version is enough to identify a release, no hash
checking is done
:return:
"""
logger.debug(
'{} processing config ledger for any upgrades'.format(self))
last_pool_upgrade_txn_start = self.get_upgrade_txn(
lambda txn: get_type(txn) == POOL_UPGRADE and get_payload_data(txn)[ACTION] == START, reverse=True)
if last_pool_upgrade_txn_start:
logger.info('{} found upgrade START txn {}'.format(
self, last_pool_upgrade_txn_start))
last_pool_upgrade_txn_seq_no = get_seq_no(last_pool_upgrade_txn_start)
# searching for CANCEL for this upgrade submitted after START txn
last_pool_upgrade_txn_cancel = self.get_upgrade_txn(
lambda txn:
get_type(txn) == POOL_UPGRADE and get_payload_data(txn)[ACTION] == CANCEL and
get_payload_data(txn)[VERSION] == get_payload_data(last_pool_upgrade_txn_start)[VERSION],
start_no=last_pool_upgrade_txn_seq_no + 1)
if last_pool_upgrade_txn_cancel:
logger.info('{} found upgrade CANCEL txn {}'.format(
self, last_pool_upgrade_txn_cancel))
return
self.handleUpgradeTxn(last_pool_upgrade_txn_start) | Checks ledger for planned but not yet performed upgrades
and schedules upgrade for the most recent one
Assumption: Only version is enough to identify a release, no hash
checking is done
:return: | Below is the the instruction that describes the task:
### Input:
Checks ledger for planned but not yet performed upgrades
and schedules upgrade for the most recent one
Assumption: Only version is enough to identify a release, no hash
checking is done
:return:
### Response:
def processLedger(self) -> None:
"""
Checks ledger for planned but not yet performed upgrades
and schedules upgrade for the most recent one
Assumption: Only version is enough to identify a release, no hash
checking is done
:return:
"""
logger.debug(
'{} processing config ledger for any upgrades'.format(self))
last_pool_upgrade_txn_start = self.get_upgrade_txn(
lambda txn: get_type(txn) == POOL_UPGRADE and get_payload_data(txn)[ACTION] == START, reverse=True)
if last_pool_upgrade_txn_start:
logger.info('{} found upgrade START txn {}'.format(
self, last_pool_upgrade_txn_start))
last_pool_upgrade_txn_seq_no = get_seq_no(last_pool_upgrade_txn_start)
# searching for CANCEL for this upgrade submitted after START txn
last_pool_upgrade_txn_cancel = self.get_upgrade_txn(
lambda txn:
get_type(txn) == POOL_UPGRADE and get_payload_data(txn)[ACTION] == CANCEL and
get_payload_data(txn)[VERSION] == get_payload_data(last_pool_upgrade_txn_start)[VERSION],
start_no=last_pool_upgrade_txn_seq_no + 1)
if last_pool_upgrade_txn_cancel:
logger.info('{} found upgrade CANCEL txn {}'.format(
self, last_pool_upgrade_txn_cancel))
return
self.handleUpgradeTxn(last_pool_upgrade_txn_start) |
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
# ensure clean namespace and seek offset
self.reset() | Reload source from disk and initialize state. | Below is the the instruction that describes the task:
### Input:
Reload source from disk and initialize state.
### Response:
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
# ensure clean namespace and seek offset
self.reset() |
def matches(self, other, **kwargs):
"""
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching fitting.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher(**kwargs)
return m.fit(Structure.from_sites(self), Structure.from_sites(other)) | Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching fitting.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation. | Below is the the instruction that describes the task:
### Input:
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching fitting.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
### Response:
def matches(self, other, **kwargs):
"""
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching fitting.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher(**kwargs)
return m.fit(Structure.from_sites(self), Structure.from_sites(other)) |
def contours(
self, elevation, interval=100, field='elev', base=0
):
"""
Extract contour lines from elevation data.
Parameters
----------
elevation : array
input elevation data
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry
"""
return commons_contours.extract_contours(
elevation, self.tile, interval=interval, field=field, base=base) | Extract contour lines from elevation data.
Parameters
----------
elevation : array
input elevation data
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry | Below is the the instruction that describes the task:
### Input:
Extract contour lines from elevation data.
Parameters
----------
elevation : array
input elevation data
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry
### Response:
def contours(
self, elevation, interval=100, field='elev', base=0
):
"""
Extract contour lines from elevation data.
Parameters
----------
elevation : array
input elevation data
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry
"""
return commons_contours.extract_contours(
elevation, self.tile, interval=interval, field=field, base=base) |
def shall_skip(module, opts):
"""Check if we want to skip this module."""
# skip it if there is nothing (or just \n or \r\n) in the file
if path.getsize(module) <= 2:
return True
# skip if it has a "private" name and this is selected
filename = path.basename(module)
if filename != '__init__.py' and filename.startswith('_') and \
not opts.includeprivate:
return True
return False | Check if we want to skip this module. | Below is the the instruction that describes the task:
### Input:
Check if we want to skip this module.
### Response:
def shall_skip(module, opts):
"""Check if we want to skip this module."""
# skip it if there is nothing (or just \n or \r\n) in the file
if path.getsize(module) <= 2:
return True
# skip if it has a "private" name and this is selected
filename = path.basename(module)
if filename != '__init__.py' and filename.startswith('_') and \
not opts.includeprivate:
return True
return False |
def get_stats(self, container_id):
"""
:param container_id:
:return: an iterable that contains dictionnaries with the stats of the running container. See the docker api for content.
"""
return self._docker.containers.get(container_id).stats(decode=True) | :param container_id:
:return: an iterable that contains dictionnaries with the stats of the running container. See the docker api for content. | Below is the the instruction that describes the task:
### Input:
:param container_id:
:return: an iterable that contains dictionnaries with the stats of the running container. See the docker api for content.
### Response:
def get_stats(self, container_id):
"""
:param container_id:
:return: an iterable that contains dictionnaries with the stats of the running container. See the docker api for content.
"""
return self._docker.containers.get(container_id).stats(decode=True) |
def optimize(self, n_particles=50, n_iterations=250, restart=1):
"""
the best result of all optimizations will be returned.
total number of lens models sovled: n_particles*n_iterations
:param n_particles: number of particle swarm particles
:param n_iterations: number of particle swarm iternations
:param restart: number of times to execute the optimization;
:return: lens model keywords, [optimized source position], best fit image positions
"""
if restart < 0:
raise ValueError("parameter 'restart' must be integer of value > 0")
# particle swarm optimization
penalties, parameters, src_pen_best = [],[], []
for run in range(0, restart):
penalty, params = self._single_optimization(n_particles, n_iterations)
penalties.append(penalty)
parameters.append(params)
src_pen_best.append(self._optimizer.src_pen_best)
# select the best optimization
best_index = np.argmin(penalties)
# combine the optimized parameters with the parameters kept fixed during the optimization to obtain full kwargs_lens
kwargs_varied = self._params.argstovary_todictionary(parameters[best_index])
kwargs_lens_final = kwargs_varied + self._params.argsfixed_todictionary()
# solve for the optimized image positions
srcx, srcy = self._optimizer.lensing._ray_shooting_fast(kwargs_varied)
source_x, source_y = np.mean(srcx), np.mean(srcy)
# if we have a good enough solution, no point in recomputing the image positions since this can be quite slow
# and will give the same answer
if src_pen_best[best_index] < self._tol_src_penalty:
x_image, y_image = self.x_pos, self.y_pos
else:
# Here, the solver has the instance of "lensing_class" or "LensModel" for multiplane/singleplane respectively.
print('Warning: possibly a bad fit.')
x_image, y_image = self.solver.findBrightImage(source_x, source_y, kwargs_lens_final, arrival_time_sort=False)
#x_image, y_image = self.solver.image_position_from_source(source_x, source_y, kwargs_lens_final, arrival_time_sort = False)
if self._verbose:
print('optimization done.')
print('Recovered source position: ', (srcx, srcy))
return kwargs_lens_final, [source_x, source_y], [x_image, y_image] | the best result of all optimizations will be returned.
total number of lens models sovled: n_particles*n_iterations
:param n_particles: number of particle swarm particles
:param n_iterations: number of particle swarm iternations
:param restart: number of times to execute the optimization;
:return: lens model keywords, [optimized source position], best fit image positions | Below is the the instruction that describes the task:
### Input:
the best result of all optimizations will be returned.
total number of lens models sovled: n_particles*n_iterations
:param n_particles: number of particle swarm particles
:param n_iterations: number of particle swarm iternations
:param restart: number of times to execute the optimization;
:return: lens model keywords, [optimized source position], best fit image positions
### Response:
def optimize(self, n_particles=50, n_iterations=250, restart=1):
"""
the best result of all optimizations will be returned.
total number of lens models sovled: n_particles*n_iterations
:param n_particles: number of particle swarm particles
:param n_iterations: number of particle swarm iternations
:param restart: number of times to execute the optimization;
:return: lens model keywords, [optimized source position], best fit image positions
"""
if restart < 0:
raise ValueError("parameter 'restart' must be integer of value > 0")
# particle swarm optimization
penalties, parameters, src_pen_best = [],[], []
for run in range(0, restart):
penalty, params = self._single_optimization(n_particles, n_iterations)
penalties.append(penalty)
parameters.append(params)
src_pen_best.append(self._optimizer.src_pen_best)
# select the best optimization
best_index = np.argmin(penalties)
# combine the optimized parameters with the parameters kept fixed during the optimization to obtain full kwargs_lens
kwargs_varied = self._params.argstovary_todictionary(parameters[best_index])
kwargs_lens_final = kwargs_varied + self._params.argsfixed_todictionary()
# solve for the optimized image positions
srcx, srcy = self._optimizer.lensing._ray_shooting_fast(kwargs_varied)
source_x, source_y = np.mean(srcx), np.mean(srcy)
# if we have a good enough solution, no point in recomputing the image positions since this can be quite slow
# and will give the same answer
if src_pen_best[best_index] < self._tol_src_penalty:
x_image, y_image = self.x_pos, self.y_pos
else:
# Here, the solver has the instance of "lensing_class" or "LensModel" for multiplane/singleplane respectively.
print('Warning: possibly a bad fit.')
x_image, y_image = self.solver.findBrightImage(source_x, source_y, kwargs_lens_final, arrival_time_sort=False)
#x_image, y_image = self.solver.image_position_from_source(source_x, source_y, kwargs_lens_final, arrival_time_sort = False)
if self._verbose:
print('optimization done.')
print('Recovered source position: ', (srcx, srcy))
return kwargs_lens_final, [source_x, source_y], [x_image, y_image] |
def flash_spi_attach(self, hspi_arg):
"""Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command.
"""
# last 3 bytes in ESP_SPI_ATTACH argument are reserved values
arg = struct.pack('<I', hspi_arg)
if not self.IS_STUB:
# ESP32 ROM loader takes additional 'is legacy' arg, which is not
# currently supported in the stub loader or esptool.py (as it's not usually needed.)
is_legacy = 0
arg += struct.pack('BBBB', is_legacy, 0, 0, 0)
self.check_command("configure SPI flash pins", ESP32ROM.ESP_SPI_ATTACH, arg) | Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command. | Below is the the instruction that describes the task:
### Input:
Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command.
### Response:
def flash_spi_attach(self, hspi_arg):
"""Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command.
"""
# last 3 bytes in ESP_SPI_ATTACH argument are reserved values
arg = struct.pack('<I', hspi_arg)
if not self.IS_STUB:
# ESP32 ROM loader takes additional 'is legacy' arg, which is not
# currently supported in the stub loader or esptool.py (as it's not usually needed.)
is_legacy = 0
arg += struct.pack('BBBB', is_legacy, 0, 0, 0)
self.check_command("configure SPI flash pins", ESP32ROM.ESP_SPI_ATTACH, arg) |
def unlock_input_target_config_target_candidate_candidate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
unlock = ET.Element("unlock")
config = unlock
input = ET.SubElement(unlock, "input")
target = ET.SubElement(input, "target")
config_target = ET.SubElement(target, "config-target")
candidate = ET.SubElement(config_target, "candidate")
candidate = ET.SubElement(candidate, "candidate")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def unlock_input_target_config_target_candidate_candidate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
unlock = ET.Element("unlock")
config = unlock
input = ET.SubElement(unlock, "input")
target = ET.SubElement(input, "target")
config_target = ET.SubElement(target, "config-target")
candidate = ET.SubElement(config_target, "candidate")
candidate = ET.SubElement(candidate, "candidate")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _process_mrk_acc_view(self):
"""
Use this table to create the idmap between the internal marker id and
the public mgiid.
No triples are produced in this process
:return:
"""
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_counter = 0
LOG.info("mapping markers to internal identifiers")
raw = '/'.join((self.rawdir, 'mrk_acc_view'))
col = [
'accid', 'prefix_part', 'logicaldb_key', 'object_key', 'preferred',
'organism_key']
with open(raw, 'r') as fh:
fh.readline() # read the header row; skip
for line in fh:
line = line.rstrip('\n')
line_counter += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefix_part')]
logicaldb_key = row[col.index('logicaldb_key')]
object_key = row[col.index('object_key')]
preferred = row[col.index('preferred')]
# organism_key)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefix_part == 'MGI:' and preferred == '1':
self.idhash['marker'][object_key] = accid
return | Use this table to create the idmap between the internal marker id and
the public mgiid.
No triples are produced in this process
:return: | Below is the the instruction that describes the task:
### Input:
Use this table to create the idmap between the internal marker id and
the public mgiid.
No triples are produced in this process
:return:
### Response:
def _process_mrk_acc_view(self):
"""
Use this table to create the idmap between the internal marker id and
the public mgiid.
No triples are produced in this process
:return:
"""
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_counter = 0
LOG.info("mapping markers to internal identifiers")
raw = '/'.join((self.rawdir, 'mrk_acc_view'))
col = [
'accid', 'prefix_part', 'logicaldb_key', 'object_key', 'preferred',
'organism_key']
with open(raw, 'r') as fh:
fh.readline() # read the header row; skip
for line in fh:
line = line.rstrip('\n')
line_counter += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefix_part')]
logicaldb_key = row[col.index('logicaldb_key')]
object_key = row[col.index('object_key')]
preferred = row[col.index('preferred')]
# organism_key)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefix_part == 'MGI:' and preferred == '1':
self.idhash['marker'][object_key] = accid
return |
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self | Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType | Below is the the instruction that describes the task:
### Input:
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
### Response:
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self |
def dasrfr(handle, lenout=_default_len_out):
"""
Return the contents of the file record of a specified DAS file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasrfr_c.html
:param handle: DAS file handle.
:type handle: int
:param lenout: length of output strs
:type lenout: str
:return: ID word, DAS internal file name, Number of reserved records in file, \
Number of characters in use in reserved rec. area, Number of comment records in file, \
Number of characters in use in comment area.
:rtype: tuple
"""
handle = ctypes.c_int(handle)
idwlen = ctypes.c_int(lenout) # intentional
ifnlen = ctypes.c_int(lenout) # intentional
idword = stypes.stringToCharP(lenout)
ifname = stypes.stringToCharP(lenout)
nresvr = ctypes.c_int(0)
nresvc = ctypes.c_int(0)
ncomr = ctypes.c_int(0)
ncomc = ctypes.c_int(0)
libspice.dasrfr_c(handle, idwlen, ifnlen, idword, ifname,
ctypes.byref(nresvr), ctypes.byref(nresvc),
ctypes.byref(ncomr), ctypes.byref(ncomc))
return stypes.toPythonString(idword), stypes.toPythonString(ifname), nresvr.value, nresvc.value, ncomr.value, ncomc.value | Return the contents of the file record of a specified DAS file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasrfr_c.html
:param handle: DAS file handle.
:type handle: int
:param lenout: length of output strs
:type lenout: str
:return: ID word, DAS internal file name, Number of reserved records in file, \
Number of characters in use in reserved rec. area, Number of comment records in file, \
Number of characters in use in comment area.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Return the contents of the file record of a specified DAS file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasrfr_c.html
:param handle: DAS file handle.
:type handle: int
:param lenout: length of output strs
:type lenout: str
:return: ID word, DAS internal file name, Number of reserved records in file, \
Number of characters in use in reserved rec. area, Number of comment records in file, \
Number of characters in use in comment area.
:rtype: tuple
### Response:
def dasrfr(handle, lenout=_default_len_out):
"""
Return the contents of the file record of a specified DAS file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasrfr_c.html
:param handle: DAS file handle.
:type handle: int
:param lenout: length of output strs
:type lenout: str
:return: ID word, DAS internal file name, Number of reserved records in file, \
Number of characters in use in reserved rec. area, Number of comment records in file, \
Number of characters in use in comment area.
:rtype: tuple
"""
handle = ctypes.c_int(handle)
idwlen = ctypes.c_int(lenout) # intentional
ifnlen = ctypes.c_int(lenout) # intentional
idword = stypes.stringToCharP(lenout)
ifname = stypes.stringToCharP(lenout)
nresvr = ctypes.c_int(0)
nresvc = ctypes.c_int(0)
ncomr = ctypes.c_int(0)
ncomc = ctypes.c_int(0)
libspice.dasrfr_c(handle, idwlen, ifnlen, idword, ifname,
ctypes.byref(nresvr), ctypes.byref(nresvc),
ctypes.byref(ncomr), ctypes.byref(ncomc))
return stypes.toPythonString(idword), stypes.toPythonString(ifname), nresvr.value, nresvc.value, ncomr.value, ncomc.value |
def save(filename_audio, filename_jam, jam, strict=True, fmt='auto', **kwargs):
'''Save a muda jam to disk
Parameters
----------
filename_audio: str
The path to store the audio file
filename_jam: str
The path to store the jams object
strict: bool
Strict safety checking for jams output
fmt : str
Output format parameter for `jams.JAMS.save`
kwargs
Additional parameters to `soundfile.write`
'''
y = jam.sandbox.muda._audio['y']
sr = jam.sandbox.muda._audio['sr']
# First, dump the audio file
psf.write(filename_audio, y, sr, **kwargs)
# Then dump the jam
jam.save(filename_jam, strict=strict, fmt=fmt) | Save a muda jam to disk
Parameters
----------
filename_audio: str
The path to store the audio file
filename_jam: str
The path to store the jams object
strict: bool
Strict safety checking for jams output
fmt : str
Output format parameter for `jams.JAMS.save`
kwargs
Additional parameters to `soundfile.write` | Below is the the instruction that describes the task:
### Input:
Save a muda jam to disk
Parameters
----------
filename_audio: str
The path to store the audio file
filename_jam: str
The path to store the jams object
strict: bool
Strict safety checking for jams output
fmt : str
Output format parameter for `jams.JAMS.save`
kwargs
Additional parameters to `soundfile.write`
### Response:
def save(filename_audio, filename_jam, jam, strict=True, fmt='auto', **kwargs):
'''Save a muda jam to disk
Parameters
----------
filename_audio: str
The path to store the audio file
filename_jam: str
The path to store the jams object
strict: bool
Strict safety checking for jams output
fmt : str
Output format parameter for `jams.JAMS.save`
kwargs
Additional parameters to `soundfile.write`
'''
y = jam.sandbox.muda._audio['y']
sr = jam.sandbox.muda._audio['sr']
# First, dump the audio file
psf.write(filename_audio, y, sr, **kwargs)
# Then dump the jam
jam.save(filename_jam, strict=strict, fmt=fmt) |
def select(self, columns=(), by=(), where=(), **kwds):
"""select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3
"""
return self._seu('select', columns, by, where, kwds) | select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3 | Below is the the instruction that describes the task:
### Input:
select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3
### Response:
def select(self, columns=(), by=(), where=(), **kwds):
"""select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3
"""
return self._seu('select', columns, by, where, kwds) |
def get_code(dag_id):
"""Return python code of a given dag_id."""
session = settings.Session()
DM = models.DagModel
dag = session.query(DM).filter(DM.dag_id == dag_id).first()
session.close()
# Check DAG exists.
if dag is None:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
try:
with wwwutils.open_maybe_zipped(dag.fileloc, 'r') as f:
code = f.read()
return code
except IOError as e:
error_message = "Error {} while reading Dag id {} Code".format(str(e), dag_id)
raise AirflowException(error_message) | Return python code of a given dag_id. | Below is the the instruction that describes the task:
### Input:
Return python code of a given dag_id.
### Response:
def get_code(dag_id):
"""Return python code of a given dag_id."""
session = settings.Session()
DM = models.DagModel
dag = session.query(DM).filter(DM.dag_id == dag_id).first()
session.close()
# Check DAG exists.
if dag is None:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
try:
with wwwutils.open_maybe_zipped(dag.fileloc, 'r') as f:
code = f.read()
return code
except IOError as e:
error_message = "Error {} while reading Dag id {} Code".format(str(e), dag_id)
raise AirflowException(error_message) |
def send_to_output(master_dict, mash_output, sample_id, assembly_file):
"""Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
-------
"""
plot_dict = {}
# create a new file only if master_dict is populated
if master_dict:
out_file = open("{}.json".format(
"".join(mash_output.split(".")[0])), "w")
out_file.write(json.dumps(master_dict))
out_file.close()
# iterate through master_dict in order to make contigs the keys
for k,v in master_dict.items():
if not v[2] in plot_dict:
plot_dict[v[2]] = [k]
else:
plot_dict[v[2]].append(k)
number_hits = len(master_dict)
else:
number_hits = 0
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [{
"header": "Mash Dist",
"table": "plasmids",
"patlas_mashdist": master_dict,
"value": number_hits
}]
}],
"plotData": [{
"sample": sample_id,
"data": {
"patlasMashDistXrange": plot_dict
},
"assemblyFile": assembly_file
}]
}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":"))) | Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
------- | Below is the the instruction that describes the task:
### Input:
Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
-------
### Response:
def send_to_output(master_dict, mash_output, sample_id, assembly_file):
"""Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
-------
"""
plot_dict = {}
# create a new file only if master_dict is populated
if master_dict:
out_file = open("{}.json".format(
"".join(mash_output.split(".")[0])), "w")
out_file.write(json.dumps(master_dict))
out_file.close()
# iterate through master_dict in order to make contigs the keys
for k,v in master_dict.items():
if not v[2] in plot_dict:
plot_dict[v[2]] = [k]
else:
plot_dict[v[2]].append(k)
number_hits = len(master_dict)
else:
number_hits = 0
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [{
"header": "Mash Dist",
"table": "plasmids",
"patlas_mashdist": master_dict,
"value": number_hits
}]
}],
"plotData": [{
"sample": sample_id,
"data": {
"patlasMashDistXrange": plot_dict
},
"assemblyFile": assembly_file
}]
}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":"))) |
def to_dict(self):
"""
Convert this FunctionDoc to a dictionary. In addition to `CommentDoc`
keys, this adds:
- **name**: The function name
- **params**: A list of parameter dictionaries
- **options**: A list of option dictionaries
- **exceptions**: A list of exception dictionaries
- **return_val**: A dictionary describing the return type, as per `ParamDoc`
- **is_private**: True if private
- **is_constructor**: True if a constructor
- **member**: The raw text of the member property.
"""
vars = super(FunctionDoc, self).to_dict()
vars.update({
'name': self.name,
'params': [param.to_dict() for param in self.params],
'options': [option.to_dict() for option in self.options],
'exceptions': [exc.to_dict() for exc in self.exceptions],
'return_val': self.return_val.to_dict(),
'is_private': self.is_private,
'is_constructor': self.is_constructor,
'member': self.member
})
return vars | Convert this FunctionDoc to a dictionary. In addition to `CommentDoc`
keys, this adds:
- **name**: The function name
- **params**: A list of parameter dictionaries
- **options**: A list of option dictionaries
- **exceptions**: A list of exception dictionaries
- **return_val**: A dictionary describing the return type, as per `ParamDoc`
- **is_private**: True if private
- **is_constructor**: True if a constructor
- **member**: The raw text of the member property. | Below is the the instruction that describes the task:
### Input:
Convert this FunctionDoc to a dictionary. In addition to `CommentDoc`
keys, this adds:
- **name**: The function name
- **params**: A list of parameter dictionaries
- **options**: A list of option dictionaries
- **exceptions**: A list of exception dictionaries
- **return_val**: A dictionary describing the return type, as per `ParamDoc`
- **is_private**: True if private
- **is_constructor**: True if a constructor
- **member**: The raw text of the member property.
### Response:
def to_dict(self):
"""
Convert this FunctionDoc to a dictionary. In addition to `CommentDoc`
keys, this adds:
- **name**: The function name
- **params**: A list of parameter dictionaries
- **options**: A list of option dictionaries
- **exceptions**: A list of exception dictionaries
- **return_val**: A dictionary describing the return type, as per `ParamDoc`
- **is_private**: True if private
- **is_constructor**: True if a constructor
- **member**: The raw text of the member property.
"""
vars = super(FunctionDoc, self).to_dict()
vars.update({
'name': self.name,
'params': [param.to_dict() for param in self.params],
'options': [option.to_dict() for option in self.options],
'exceptions': [exc.to_dict() for exc in self.exceptions],
'return_val': self.return_val.to_dict(),
'is_private': self.is_private,
'is_constructor': self.is_constructor,
'member': self.member
})
return vars |
def print_dot(docgraph):
"""
converts a document graph into a dot file and returns it as a string.
If this function call is prepended by %dotstr,
it will display the given document graph as a dot/graphviz graph
in the currently running IPython notebook session.
To use this function, the gvmagic IPython notebook extension
needs to be installed once::
%install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py
In order to visualize dot graphs in your currently running
IPython notebook, run this command once::
%load_ext gvmagic
"""
stripped_graph = preprocess_for_pydot(docgraph)
return nx.drawing.nx_pydot.to_pydot(stripped_graph).to_string() | converts a document graph into a dot file and returns it as a string.
If this function call is prepended by %dotstr,
it will display the given document graph as a dot/graphviz graph
in the currently running IPython notebook session.
To use this function, the gvmagic IPython notebook extension
needs to be installed once::
%install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py
In order to visualize dot graphs in your currently running
IPython notebook, run this command once::
%load_ext gvmagic | Below is the the instruction that describes the task:
### Input:
converts a document graph into a dot file and returns it as a string.
If this function call is prepended by %dotstr,
it will display the given document graph as a dot/graphviz graph
in the currently running IPython notebook session.
To use this function, the gvmagic IPython notebook extension
needs to be installed once::
%install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py
In order to visualize dot graphs in your currently running
IPython notebook, run this command once::
%load_ext gvmagic
### Response:
def print_dot(docgraph):
"""
converts a document graph into a dot file and returns it as a string.
If this function call is prepended by %dotstr,
it will display the given document graph as a dot/graphviz graph
in the currently running IPython notebook session.
To use this function, the gvmagic IPython notebook extension
needs to be installed once::
%install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py
In order to visualize dot graphs in your currently running
IPython notebook, run this command once::
%load_ext gvmagic
"""
stripped_graph = preprocess_for_pydot(docgraph)
return nx.drawing.nx_pydot.to_pydot(stripped_graph).to_string() |
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs | Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed. | Below is the the instruction that describes the task:
### Input:
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
### Response:
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs |
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time) | Set content into the cache | Below is the the instruction that describes the task:
### Input:
Set content into the cache
### Response:
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time) |
def save_as(self):
"""Save *as* the currently edited file"""
editorstack = self.get_current_editorstack()
if editorstack.save_as():
fname = editorstack.get_current_filename()
self.__add_recent_file(fname) | Save *as* the currently edited file | Below is the the instruction that describes the task:
### Input:
Save *as* the currently edited file
### Response:
def save_as(self):
"""Save *as* the currently edited file"""
editorstack = self.get_current_editorstack()
if editorstack.save_as():
fname = editorstack.get_current_filename()
self.__add_recent_file(fname) |
def strip_html(string, keep_tag_content=False):
"""
Remove html code contained into the given string.
:param string: String to manipulate.
:type string: str
:param keep_tag_content: True to preserve tag content, False to remove tag and its content too (default).
:type keep_tag_content: bool
:return: String with html removed.
:rtype: str
"""
r = HTML_TAG_ONLY_RE if keep_tag_content else HTML_RE
return r.sub('', string) | Remove html code contained into the given string.
:param string: String to manipulate.
:type string: str
:param keep_tag_content: True to preserve tag content, False to remove tag and its content too (default).
:type keep_tag_content: bool
:return: String with html removed.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Remove html code contained into the given string.
:param string: String to manipulate.
:type string: str
:param keep_tag_content: True to preserve tag content, False to remove tag and its content too (default).
:type keep_tag_content: bool
:return: String with html removed.
:rtype: str
### Response:
def strip_html(string, keep_tag_content=False):
"""
Remove html code contained into the given string.
:param string: String to manipulate.
:type string: str
:param keep_tag_content: True to preserve tag content, False to remove tag and its content too (default).
:type keep_tag_content: bool
:return: String with html removed.
:rtype: str
"""
r = HTML_TAG_ONLY_RE if keep_tag_content else HTML_RE
return r.sub('', string) |
def _refresh(self):
"""Refreshes the cursor with more data from Mongo.
Returns the length of self.__data after refresh. Will exit early if
self.__data is already non-empty. Raises OperationFailure when the
cursor cannot be refreshed due to an error on the query.
"""
if len(self.__data) or self.__killed:
return len(self.__data)
if not self.__session:
self.__session = self.__collection.database.client._ensure_session()
if self.__id is None: # Query
if (self.__min or self.__max) and not self.__hint:
warnings.warn("using a min/max query operator without "
"specifying a Cursor.hint is deprecated. A "
"hint will be required when using min/max in "
"PyMongo 4.0",
DeprecationWarning, stacklevel=3)
q = self._query_class(self.__query_flags,
self.__collection.database.name,
self.__collection.name,
self.__skip,
self.__query_spec(),
self.__projection,
self.__codec_options,
self._read_preference(),
self.__limit,
self.__batch_size,
self.__read_concern,
self.__collation,
self.__session,
self.__collection.database.client)
self.__send_message(q)
elif self.__id: # Get More
if self.__limit:
limit = self.__limit - self.__retrieved
if self.__batch_size:
limit = min(limit, self.__batch_size)
else:
limit = self.__batch_size
# Exhaust cursors don't send getMore messages.
g = self._getmore_class(self.__collection.database.name,
self.__collection.name,
limit,
self.__id,
self.__codec_options,
self._read_preference(),
self.__session,
self.__collection.database.client,
self.__max_await_time_ms,
self.__exhaust_mgr)
self.__send_message(g)
return len(self.__data) | Refreshes the cursor with more data from Mongo.
Returns the length of self.__data after refresh. Will exit early if
self.__data is already non-empty. Raises OperationFailure when the
cursor cannot be refreshed due to an error on the query. | Below is the the instruction that describes the task:
### Input:
Refreshes the cursor with more data from Mongo.
Returns the length of self.__data after refresh. Will exit early if
self.__data is already non-empty. Raises OperationFailure when the
cursor cannot be refreshed due to an error on the query.
### Response:
def _refresh(self):
"""Refreshes the cursor with more data from Mongo.
Returns the length of self.__data after refresh. Will exit early if
self.__data is already non-empty. Raises OperationFailure when the
cursor cannot be refreshed due to an error on the query.
"""
if len(self.__data) or self.__killed:
return len(self.__data)
if not self.__session:
self.__session = self.__collection.database.client._ensure_session()
if self.__id is None: # Query
if (self.__min or self.__max) and not self.__hint:
warnings.warn("using a min/max query operator without "
"specifying a Cursor.hint is deprecated. A "
"hint will be required when using min/max in "
"PyMongo 4.0",
DeprecationWarning, stacklevel=3)
q = self._query_class(self.__query_flags,
self.__collection.database.name,
self.__collection.name,
self.__skip,
self.__query_spec(),
self.__projection,
self.__codec_options,
self._read_preference(),
self.__limit,
self.__batch_size,
self.__read_concern,
self.__collation,
self.__session,
self.__collection.database.client)
self.__send_message(q)
elif self.__id: # Get More
if self.__limit:
limit = self.__limit - self.__retrieved
if self.__batch_size:
limit = min(limit, self.__batch_size)
else:
limit = self.__batch_size
# Exhaust cursors don't send getMore messages.
g = self._getmore_class(self.__collection.database.name,
self.__collection.name,
limit,
self.__id,
self.__codec_options,
self._read_preference(),
self.__session,
self.__collection.database.client,
self.__max_await_time_ms,
self.__exhaust_mgr)
self.__send_message(g)
return len(self.__data) |
def mark(self, channel_name, ts):
""" https://api.slack.com/methods/channels.mark
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'ts': ts,
})
return FromUrl('https://slack.com/api/channels.mark', self._requests)(data=self.params).post() | https://api.slack.com/methods/channels.mark | Below is the the instruction that describes the task:
### Input:
https://api.slack.com/methods/channels.mark
### Response:
def mark(self, channel_name, ts):
""" https://api.slack.com/methods/channels.mark
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'ts': ts,
})
return FromUrl('https://slack.com/api/channels.mark', self._requests)(data=self.params).post() |
def mouse_press_event(self, event):
"""
Forward mouse press events to the example
"""
# Support left and right mouse button for now
if event.button() not in [1, 2]:
return
self.example.mouse_press_event(event.x(), event.y(), event.button()) | Forward mouse press events to the example | Below is the the instruction that describes the task:
### Input:
Forward mouse press events to the example
### Response:
def mouse_press_event(self, event):
"""
Forward mouse press events to the example
"""
# Support left and right mouse button for now
if event.button() not in [1, 2]:
return
self.example.mouse_press_event(event.x(), event.y(), event.button()) |
def list_snapshots_for_a_minute(path, cam_id, day, hourm):
"""Returns a list of screenshots"""
screenshoots_path = path+"/"+str(cam_id)+"/"+day+"/"+hourm
if os.path.exists(screenshoots_path):
screenshots = [scr for scr in sorted(os.listdir(screenshoots_path))]
return screenshots
else:
return [] | Returns a list of screenshots | Below is the the instruction that describes the task:
### Input:
Returns a list of screenshots
### Response:
def list_snapshots_for_a_minute(path, cam_id, day, hourm):
"""Returns a list of screenshots"""
screenshoots_path = path+"/"+str(cam_id)+"/"+day+"/"+hourm
if os.path.exists(screenshoots_path):
screenshots = [scr for scr in sorted(os.listdir(screenshoots_path))]
return screenshots
else:
return [] |
def _get_vrfs(self):
"""Get the current VRFs configured in the device.
:return: A list of vrf names as string
"""
vrfs = []
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
vrfs_raw = parse.find_lines("^vrf definition")
for line in vrfs_raw:
# raw format ['ip vrf <vrf-name>',....]
vrf_name = line.strip().split(' ')[2]
vrfs.append(vrf_name)
LOG.info("VRFs:%s", vrfs)
return vrfs | Get the current VRFs configured in the device.
:return: A list of vrf names as string | Below is the the instruction that describes the task:
### Input:
Get the current VRFs configured in the device.
:return: A list of vrf names as string
### Response:
def _get_vrfs(self):
"""Get the current VRFs configured in the device.
:return: A list of vrf names as string
"""
vrfs = []
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
vrfs_raw = parse.find_lines("^vrf definition")
for line in vrfs_raw:
# raw format ['ip vrf <vrf-name>',....]
vrf_name = line.strip().split(' ')[2]
vrfs.append(vrf_name)
LOG.info("VRFs:%s", vrfs)
return vrfs |
def getVersion(self, agent, word):
"""
=> version string /None
"""
version_markers = self.version_markers if \
isinstance(self.version_markers[0], (list, tuple)) else [self.version_markers]
version_part = agent.split(word, 1)[-1]
for start, end in version_markers:
if version_part.startswith(start) and end in version_part:
version = version_part[1:]
if end: # end could be empty string
version = version.split(end)[0]
if not self.allow_space_in_version:
version = version.split()[0]
return version | => version string /None | Below is the the instruction that describes the task:
### Input:
=> version string /None
### Response:
def getVersion(self, agent, word):
"""
=> version string /None
"""
version_markers = self.version_markers if \
isinstance(self.version_markers[0], (list, tuple)) else [self.version_markers]
version_part = agent.split(word, 1)[-1]
for start, end in version_markers:
if version_part.startswith(start) and end in version_part:
version = version_part[1:]
if end: # end could be empty string
version = version.split(end)[0]
if not self.allow_space_in_version:
version = version.split()[0]
return version |
def _extract_jump_targets(stmt):
"""
Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list
"""
targets = [ ]
# FIXME: We are assuming all jump targets are concrete targets. They may not be.
if isinstance(stmt, ailment.Stmt.Jump):
targets.append(stmt.target.value)
elif isinstance(stmt, ailment.Stmt.ConditionalJump):
targets.append(stmt.true_target.value)
targets.append(stmt.false_target.value)
return targets | Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list
### Response:
def _extract_jump_targets(stmt):
"""
Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list
"""
targets = [ ]
# FIXME: We are assuming all jump targets are concrete targets. They may not be.
if isinstance(stmt, ailment.Stmt.Jump):
targets.append(stmt.target.value)
elif isinstance(stmt, ailment.Stmt.ConditionalJump):
targets.append(stmt.true_target.value)
targets.append(stmt.false_target.value)
return targets |
def p_localparamdecl_integer(self, p):
'localparamdecl : LOCALPARAM INTEGER param_substitution_list SEMICOLON'
paramlist = [Localparam(rname, rvalue, lineno=p.lineno(3))
for rname, rvalue in p[3]]
p[0] = Decl(tuple(paramlist), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | localparamdecl : LOCALPARAM INTEGER param_substitution_list SEMICOLON | Below is the the instruction that describes the task:
### Input:
localparamdecl : LOCALPARAM INTEGER param_substitution_list SEMICOLON
### Response:
def p_localparamdecl_integer(self, p):
'localparamdecl : LOCALPARAM INTEGER param_substitution_list SEMICOLON'
paramlist = [Localparam(rname, rvalue, lineno=p.lineno(3))
for rname, rvalue in p[3]]
p[0] = Decl(tuple(paramlist), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def init_xena(api, logger, owner, ip=None, port=57911):
""" Create XenaManager object.
:param api: cli/rest
:param logger: python logger
:param owner: owner of the scripting session
:param ip: rest server IP
:param port: rest server TCP port
:return: Xena object
:rtype: XenaApp
"""
if api == ApiType.socket:
api_wrapper = XenaCliWrapper(logger)
elif api == ApiType.rest:
api_wrapper = XenaRestWrapper(logger, ip, port)
return XenaApp(logger, owner, api_wrapper) | Create XenaManager object.
:param api: cli/rest
:param logger: python logger
:param owner: owner of the scripting session
:param ip: rest server IP
:param port: rest server TCP port
:return: Xena object
:rtype: XenaApp | Below is the the instruction that describes the task:
### Input:
Create XenaManager object.
:param api: cli/rest
:param logger: python logger
:param owner: owner of the scripting session
:param ip: rest server IP
:param port: rest server TCP port
:return: Xena object
:rtype: XenaApp
### Response:
def init_xena(api, logger, owner, ip=None, port=57911):
""" Create XenaManager object.
:param api: cli/rest
:param logger: python logger
:param owner: owner of the scripting session
:param ip: rest server IP
:param port: rest server TCP port
:return: Xena object
:rtype: XenaApp
"""
if api == ApiType.socket:
api_wrapper = XenaCliWrapper(logger)
elif api == ApiType.rest:
api_wrapper = XenaRestWrapper(logger, ip, port)
return XenaApp(logger, owner, api_wrapper) |
def push_uci(self, uci: str) -> Move:
"""
Parses a move in UCI notation and puts it on the move stack.
Returns the move.
:raises: :exc:`ValueError` if the move is invalid or illegal in the
current position (but not a null move).
"""
move = self.parse_uci(uci)
self.push(move)
return move | Parses a move in UCI notation and puts it on the move stack.
Returns the move.
:raises: :exc:`ValueError` if the move is invalid or illegal in the
current position (but not a null move). | Below is the the instruction that describes the task:
### Input:
Parses a move in UCI notation and puts it on the move stack.
Returns the move.
:raises: :exc:`ValueError` if the move is invalid or illegal in the
current position (but not a null move).
### Response:
def push_uci(self, uci: str) -> Move:
"""
Parses a move in UCI notation and puts it on the move stack.
Returns the move.
:raises: :exc:`ValueError` if the move is invalid or illegal in the
current position (but not a null move).
"""
move = self.parse_uci(uci)
self.push(move)
return move |
def _filter_fields(self, filter_function):
"""
Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted.
"""
fields = []
if self.parent_type:
fields.extend(self.parent_type._filter_fields(filter_function))
fields.extend(filter(filter_function, self.fields))
return fields | Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted. | Below is the the instruction that describes the task:
### Input:
Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted.
### Response:
def _filter_fields(self, filter_function):
"""
Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted.
"""
fields = []
if self.parent_type:
fields.extend(self.parent_type._filter_fields(filter_function))
fields.extend(filter(filter_function, self.fields))
return fields |
def _get_sorted_section(self, nts_section):
"""Sort GO IDs in each section, if requested by user."""
#pylint: disable=unnecessary-lambda
if self.section_sortby is True:
return sorted(nts_section, key=lambda nt: self.sortgos.usrgo_sortby(nt))
if self.section_sortby is False or self.section_sortby is None:
return nts_section
# print('SORT GO IDS IN A SECTION')
return sorted(nts_section, key=lambda nt: self.section_sortby(nt)) | Sort GO IDs in each section, if requested by user. | Below is the the instruction that describes the task:
### Input:
Sort GO IDs in each section, if requested by user.
### Response:
def _get_sorted_section(self, nts_section):
"""Sort GO IDs in each section, if requested by user."""
#pylint: disable=unnecessary-lambda
if self.section_sortby is True:
return sorted(nts_section, key=lambda nt: self.sortgos.usrgo_sortby(nt))
if self.section_sortby is False or self.section_sortby is None:
return nts_section
# print('SORT GO IDS IN A SECTION')
return sorted(nts_section, key=lambda nt: self.section_sortby(nt)) |
def load_collectors_from_paths(paths):
"""
Scan for collectors to load from path
"""
# Initialize return value
collectors = {}
if paths is None:
return
if isinstance(paths, basestring):
paths = paths.split(',')
paths = map(str.strip, paths)
load_include_path(paths)
for path in paths:
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
if path.endswith('tests') or path.endswith('fixtures'):
return collectors
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
subcollectors = load_collectors_from_paths([fpath])
for key in subcollectors:
collectors[key] = subcollectors[key]
# Ignore anything that isn't a .py file
elif (os.path.isfile(fpath) and
len(f) > 3 and
f[-3:] == '.py' and
f[0:4] != 'test' and
f[0] != '.'):
modname = f[:-3]
fp, pathname, description = imp.find_module(modname, [path])
try:
# Import the module
mod = imp.load_module(modname, fp, pathname, description)
except (KeyboardInterrupt, SystemExit) as err:
logger.error(
"System or keyboard interrupt "
"while loading module %s"
% modname)
if isinstance(err, SystemExit):
sys.exit(err.code)
raise KeyboardInterrupt
except Exception:
# Log error
logger.error("Failed to import module: %s. %s",
modname,
traceback.format_exc())
else:
for name, cls in get_collectors_from_module(mod):
collectors[name] = cls
finally:
if fp:
fp.close()
# Return Collector classes
return collectors | Scan for collectors to load from path | Below is the the instruction that describes the task:
### Input:
Scan for collectors to load from path
### Response:
def load_collectors_from_paths(paths):
"""
Scan for collectors to load from path
"""
# Initialize return value
collectors = {}
if paths is None:
return
if isinstance(paths, basestring):
paths = paths.split(',')
paths = map(str.strip, paths)
load_include_path(paths)
for path in paths:
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
if path.endswith('tests') or path.endswith('fixtures'):
return collectors
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
subcollectors = load_collectors_from_paths([fpath])
for key in subcollectors:
collectors[key] = subcollectors[key]
# Ignore anything that isn't a .py file
elif (os.path.isfile(fpath) and
len(f) > 3 and
f[-3:] == '.py' and
f[0:4] != 'test' and
f[0] != '.'):
modname = f[:-3]
fp, pathname, description = imp.find_module(modname, [path])
try:
# Import the module
mod = imp.load_module(modname, fp, pathname, description)
except (KeyboardInterrupt, SystemExit) as err:
logger.error(
"System or keyboard interrupt "
"while loading module %s"
% modname)
if isinstance(err, SystemExit):
sys.exit(err.code)
raise KeyboardInterrupt
except Exception:
# Log error
logger.error("Failed to import module: %s. %s",
modname,
traceback.format_exc())
else:
for name, cls in get_collectors_from_module(mod):
collectors[name] = cls
finally:
if fp:
fp.close()
# Return Collector classes
return collectors |
def parse_euro_date(self, date_string: str):
""" Parses dd/MM/yyyy dates """
self.date = datetime.strptime(date_string, "%d/%m/%Y")
return self.date | Parses dd/MM/yyyy dates | Below is the the instruction that describes the task:
### Input:
Parses dd/MM/yyyy dates
### Response:
def parse_euro_date(self, date_string: str):
""" Parses dd/MM/yyyy dates """
self.date = datetime.strptime(date_string, "%d/%m/%Y")
return self.date |
def run_nested_groups():
"""Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase
"""
test = htf.Test(
htf.PhaseGroup(
main=[
main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
inner_main_phase),
],
teardown=[teardown_phase]
)
)
test.execute() | Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase | Below is the the instruction that describes the task:
### Input:
Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase
### Response:
def run_nested_groups():
"""Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase
"""
test = htf.Test(
htf.PhaseGroup(
main=[
main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
inner_main_phase),
],
teardown=[teardown_phase]
)
)
test.execute() |
def TexSoup(tex_code):
r"""
At a high-level, parses provided Tex into a navigable, searchable structure.
This is accomplished in two steps:
1. Tex is parsed, cleaned, and packaged.
2. Structure fed to TexNodes for a searchable, coder-friendly interface.
:param Union[str,iterable] tex_code: the Tex source
:return: :class:`TexSoup.data.TexNode` object representing tex document
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{document}
...
... \section{Hello \textit{world}.}
...
... \subsection{Watermelon}
...
... (n.) A sacred fruit. Also known as:
...
... \begin{itemize}
... \item red lemon
... \item life
... \end{itemize}
...
... Here is the prevalence of each synonym.
...
... \begin{tabular}{c c}
... red lemon & uncommon \\ \n
... life & common
... \end{tabular}
...
... \end{document}
... ''')
>>> soup.section
\section{Hello \textit{world}.}
>>> soup.section.name
'section'
>>> soup.section.string
'Hello \\textit{world}.'
>>> soup.section.parent.name
'document'
>>> soup.tabular
\begin{tabular}{c c}
red lemon & uncommon \\ \n
life & common
\end{tabular}
>>> soup.tabular.args[0].value
'c c'
>>> soup.itemize
\begin{itemize}
\item red lemon
\item life
\end{itemize}
>>> soup.item
\item red lemon
...
>>> list(soup.find_all('item'))
[\item red lemon
, \item life
]
>>> soup = TexSoup(r'''\textbf{'Hello'}\textit{'Y'}O\textit{'U'}''')
>>> soup.textbf.delete()
>>> 'Hello' not in repr(soup)
True
>>> soup.textit.replace_with('S')
>>> soup.textit.replace_with('U', 'P')
>>> soup
SOUP
"""
parsed, src = read(tex_code)
return TexNode(parsed, src=src) | r"""
At a high-level, parses provided Tex into a navigable, searchable structure.
This is accomplished in two steps:
1. Tex is parsed, cleaned, and packaged.
2. Structure fed to TexNodes for a searchable, coder-friendly interface.
:param Union[str,iterable] tex_code: the Tex source
:return: :class:`TexSoup.data.TexNode` object representing tex document
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{document}
...
... \section{Hello \textit{world}.}
...
... \subsection{Watermelon}
...
... (n.) A sacred fruit. Also known as:
...
... \begin{itemize}
... \item red lemon
... \item life
... \end{itemize}
...
... Here is the prevalence of each synonym.
...
... \begin{tabular}{c c}
... red lemon & uncommon \\ \n
... life & common
... \end{tabular}
...
... \end{document}
... ''')
>>> soup.section
\section{Hello \textit{world}.}
>>> soup.section.name
'section'
>>> soup.section.string
'Hello \\textit{world}.'
>>> soup.section.parent.name
'document'
>>> soup.tabular
\begin{tabular}{c c}
red lemon & uncommon \\ \n
life & common
\end{tabular}
>>> soup.tabular.args[0].value
'c c'
>>> soup.itemize
\begin{itemize}
\item red lemon
\item life
\end{itemize}
>>> soup.item
\item red lemon
...
>>> list(soup.find_all('item'))
[\item red lemon
, \item life
]
>>> soup = TexSoup(r'''\textbf{'Hello'}\textit{'Y'}O\textit{'U'}''')
>>> soup.textbf.delete()
>>> 'Hello' not in repr(soup)
True
>>> soup.textit.replace_with('S')
>>> soup.textit.replace_with('U', 'P')
>>> soup
SOUP | Below is the the instruction that describes the task:
### Input:
r"""
At a high-level, parses provided Tex into a navigable, searchable structure.
This is accomplished in two steps:
1. Tex is parsed, cleaned, and packaged.
2. Structure fed to TexNodes for a searchable, coder-friendly interface.
:param Union[str,iterable] tex_code: the Tex source
:return: :class:`TexSoup.data.TexNode` object representing tex document
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{document}
...
... \section{Hello \textit{world}.}
...
... \subsection{Watermelon}
...
... (n.) A sacred fruit. Also known as:
...
... \begin{itemize}
... \item red lemon
... \item life
... \end{itemize}
...
... Here is the prevalence of each synonym.
...
... \begin{tabular}{c c}
... red lemon & uncommon \\ \n
... life & common
... \end{tabular}
...
... \end{document}
... ''')
>>> soup.section
\section{Hello \textit{world}.}
>>> soup.section.name
'section'
>>> soup.section.string
'Hello \\textit{world}.'
>>> soup.section.parent.name
'document'
>>> soup.tabular
\begin{tabular}{c c}
red lemon & uncommon \\ \n
life & common
\end{tabular}
>>> soup.tabular.args[0].value
'c c'
>>> soup.itemize
\begin{itemize}
\item red lemon
\item life
\end{itemize}
>>> soup.item
\item red lemon
...
>>> list(soup.find_all('item'))
[\item red lemon
, \item life
]
>>> soup = TexSoup(r'''\textbf{'Hello'}\textit{'Y'}O\textit{'U'}''')
>>> soup.textbf.delete()
>>> 'Hello' not in repr(soup)
True
>>> soup.textit.replace_with('S')
>>> soup.textit.replace_with('U', 'P')
>>> soup
SOUP
### Response:
def TexSoup(tex_code):
r"""
At a high-level, parses provided Tex into a navigable, searchable structure.
This is accomplished in two steps:
1. Tex is parsed, cleaned, and packaged.
2. Structure fed to TexNodes for a searchable, coder-friendly interface.
:param Union[str,iterable] tex_code: the Tex source
:return: :class:`TexSoup.data.TexNode` object representing tex document
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{document}
...
... \section{Hello \textit{world}.}
...
... \subsection{Watermelon}
...
... (n.) A sacred fruit. Also known as:
...
... \begin{itemize}
... \item red lemon
... \item life
... \end{itemize}
...
... Here is the prevalence of each synonym.
...
... \begin{tabular}{c c}
... red lemon & uncommon \\ \n
... life & common
... \end{tabular}
...
... \end{document}
... ''')
>>> soup.section
\section{Hello \textit{world}.}
>>> soup.section.name
'section'
>>> soup.section.string
'Hello \\textit{world}.'
>>> soup.section.parent.name
'document'
>>> soup.tabular
\begin{tabular}{c c}
red lemon & uncommon \\ \n
life & common
\end{tabular}
>>> soup.tabular.args[0].value
'c c'
>>> soup.itemize
\begin{itemize}
\item red lemon
\item life
\end{itemize}
>>> soup.item
\item red lemon
...
>>> list(soup.find_all('item'))
[\item red lemon
, \item life
]
>>> soup = TexSoup(r'''\textbf{'Hello'}\textit{'Y'}O\textit{'U'}''')
>>> soup.textbf.delete()
>>> 'Hello' not in repr(soup)
True
>>> soup.textit.replace_with('S')
>>> soup.textit.replace_with('U', 'P')
>>> soup
SOUP
"""
parsed, src = read(tex_code)
return TexNode(parsed, src=src) |
def create_theta(self):
"""
Returns the set of inner angles (between 0 and pi)
reconstructed from point coordinates.
Also returns the corners corresponding to each entry of theta.
"""
import itertools
from pylocus.basics_angles import from_0_to_pi
theta = np.empty((self.M, ))
corners = np.empty((self.M, 3))
k = 0
indices = np.arange(self.N)
for triangle in itertools.combinations(indices, 3):
for counter, idx in enumerate(triangle):
corner = idx
other = np.delete(triangle, counter)
corners[k, :] = [corner, other[0], other[1]]
theta[k] = self.get_inner_angle(corner, other)
theta[k] = from_0_to_pi(theta[k])
if DEBUG:
print(self.abs_angles[corner, other[0]],
self.abs_angles[corner, other[1]])
print('theta', corners[k, :], theta[k])
k = k + 1
inner_angle_sum = theta[k - 1] + theta[k - 2] + theta[k - 3]
assert abs(inner_angle_sum - pi) < 1e-10, \
'inner angle sum: {} {} {}'.format(
triangle, inner_angle_sum, (theta[k - 1], theta[k - 2], theta[k - 3]))
self.theta = theta
self.corners = corners
return theta, corners | Returns the set of inner angles (between 0 and pi)
reconstructed from point coordinates.
Also returns the corners corresponding to each entry of theta. | Below is the the instruction that describes the task:
### Input:
Returns the set of inner angles (between 0 and pi)
reconstructed from point coordinates.
Also returns the corners corresponding to each entry of theta.
### Response:
def create_theta(self):
"""
Returns the set of inner angles (between 0 and pi)
reconstructed from point coordinates.
Also returns the corners corresponding to each entry of theta.
"""
import itertools
from pylocus.basics_angles import from_0_to_pi
theta = np.empty((self.M, ))
corners = np.empty((self.M, 3))
k = 0
indices = np.arange(self.N)
for triangle in itertools.combinations(indices, 3):
for counter, idx in enumerate(triangle):
corner = idx
other = np.delete(triangle, counter)
corners[k, :] = [corner, other[0], other[1]]
theta[k] = self.get_inner_angle(corner, other)
theta[k] = from_0_to_pi(theta[k])
if DEBUG:
print(self.abs_angles[corner, other[0]],
self.abs_angles[corner, other[1]])
print('theta', corners[k, :], theta[k])
k = k + 1
inner_angle_sum = theta[k - 1] + theta[k - 2] + theta[k - 3]
assert abs(inner_angle_sum - pi) < 1e-10, \
'inner angle sum: {} {} {}'.format(
triangle, inner_angle_sum, (theta[k - 1], theta[k - 2], theta[k - 3]))
self.theta = theta
self.corners = corners
return theta, corners |
def read_int(self, lpBaseAddress):
"""
Reads a signed integer from the memory of the process.
@see: L{peek_int}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@rtype: int
@return: Integer value read from the process memory.
@raise WindowsError: On error an exception is raised.
"""
return self.__read_c_type(lpBaseAddress, compat.b('@l'), ctypes.c_int) | Reads a signed integer from the memory of the process.
@see: L{peek_int}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@rtype: int
@return: Integer value read from the process memory.
@raise WindowsError: On error an exception is raised. | Below is the the instruction that describes the task:
### Input:
Reads a signed integer from the memory of the process.
@see: L{peek_int}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@rtype: int
@return: Integer value read from the process memory.
@raise WindowsError: On error an exception is raised.
### Response:
def read_int(self, lpBaseAddress):
"""
Reads a signed integer from the memory of the process.
@see: L{peek_int}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@rtype: int
@return: Integer value read from the process memory.
@raise WindowsError: On error an exception is raised.
"""
return self.__read_c_type(lpBaseAddress, compat.b('@l'), ctypes.c_int) |
def result(self):
"""
The result of the jobs execution. Accessing this property while the job is
pending or running will raise #InvalidState. If an exception occured during
the jobs execution, it will be raised.
# Raises
InvalidState: If the job is not in state #FINISHED.
Cancelled: If the job was cancelled.
any: If an exception ocurred during the job's execution.
"""
if self.__cancelled:
raise Job.Cancelled
elif self.__state in (Job.PENDING, Job.RUNNING):
raise Job.InvalidState('job is {0}'.format(self.__state))
elif self.__state == Job.ERROR:
reraise(*self.__exception)
elif self.__state == Job.SUCCESS:
return self.__result
else:
raise RuntimeError('invalid job state {0!r}'.format(self.__state)) | The result of the jobs execution. Accessing this property while the job is
pending or running will raise #InvalidState. If an exception occured during
the jobs execution, it will be raised.
# Raises
InvalidState: If the job is not in state #FINISHED.
Cancelled: If the job was cancelled.
any: If an exception ocurred during the job's execution. | Below is the the instruction that describes the task:
### Input:
The result of the jobs execution. Accessing this property while the job is
pending or running will raise #InvalidState. If an exception occured during
the jobs execution, it will be raised.
# Raises
InvalidState: If the job is not in state #FINISHED.
Cancelled: If the job was cancelled.
any: If an exception ocurred during the job's execution.
### Response:
def result(self):
"""
The result of the jobs execution. Accessing this property while the job is
pending or running will raise #InvalidState. If an exception occured during
the jobs execution, it will be raised.
# Raises
InvalidState: If the job is not in state #FINISHED.
Cancelled: If the job was cancelled.
any: If an exception ocurred during the job's execution.
"""
if self.__cancelled:
raise Job.Cancelled
elif self.__state in (Job.PENDING, Job.RUNNING):
raise Job.InvalidState('job is {0}'.format(self.__state))
elif self.__state == Job.ERROR:
reraise(*self.__exception)
elif self.__state == Job.SUCCESS:
return self.__result
else:
raise RuntimeError('invalid job state {0!r}'.format(self.__state)) |
def verify(expr, params=None):
"""
Determine if expression can be successfully translated to execute on
MapD
"""
try:
compile(expr, params=params)
return True
except com.TranslationError:
return False | Determine if expression can be successfully translated to execute on
MapD | Below is the the instruction that describes the task:
### Input:
Determine if expression can be successfully translated to execute on
MapD
### Response:
def verify(expr, params=None):
"""
Determine if expression can be successfully translated to execute on
MapD
"""
try:
compile(expr, params=params)
return True
except com.TranslationError:
return False |
def can(obj):
"""Prepare an object for pickling."""
import_needed = False
for cls, canner in iteritems(can_map):
if isinstance(cls, string_types):
import_needed = True
break
elif istype(obj, cls):
return canner(obj)
if import_needed:
# perform can_map imports, then try again
# this will usually only happen once
_import_mapping(can_map, _original_can_map)
return can(obj)
return obj | Prepare an object for pickling. | Below is the the instruction that describes the task:
### Input:
Prepare an object for pickling.
### Response:
def can(obj):
"""Prepare an object for pickling."""
import_needed = False
for cls, canner in iteritems(can_map):
if isinstance(cls, string_types):
import_needed = True
break
elif istype(obj, cls):
return canner(obj)
if import_needed:
# perform can_map imports, then try again
# this will usually only happen once
_import_mapping(can_map, _original_can_map)
return can(obj)
return obj |
def calc_information_ratio(returns, benchmark_returns):
"""
Calculates the `Information ratio <https://www.investopedia.com/terms/i/informationratio.asp>`_ (or `from Wikipedia <http://en.wikipedia.org/wiki/Information_ratio>`_).
"""
diff_rets = returns - benchmark_returns
diff_std = np.std(diff_rets, ddof=1)
if np.isnan(diff_std) or diff_std == 0:
return 0.0
return np.divide(diff_rets.mean(), diff_std) | Calculates the `Information ratio <https://www.investopedia.com/terms/i/informationratio.asp>`_ (or `from Wikipedia <http://en.wikipedia.org/wiki/Information_ratio>`_). | Below is the the instruction that describes the task:
### Input:
Calculates the `Information ratio <https://www.investopedia.com/terms/i/informationratio.asp>`_ (or `from Wikipedia <http://en.wikipedia.org/wiki/Information_ratio>`_).
### Response:
def calc_information_ratio(returns, benchmark_returns):
"""
Calculates the `Information ratio <https://www.investopedia.com/terms/i/informationratio.asp>`_ (or `from Wikipedia <http://en.wikipedia.org/wiki/Information_ratio>`_).
"""
diff_rets = returns - benchmark_returns
diff_std = np.std(diff_rets, ddof=1)
if np.isnan(diff_std) or diff_std == 0:
return 0.0
return np.divide(diff_rets.mean(), diff_std) |
def client_start(request, socket, context):
"""
Adds the client triple to CLIENTS.
"""
CLIENTS[socket.session.session_id] = (request, socket, context) | Adds the client triple to CLIENTS. | Below is the the instruction that describes the task:
### Input:
Adds the client triple to CLIENTS.
### Response:
def client_start(request, socket, context):
"""
Adds the client triple to CLIENTS.
"""
CLIENTS[socket.session.session_id] = (request, socket, context) |
def check_output(self, cmd, timeout=None, keep_rc=False, env=None):
""" Subclasses can override to provide special
environment setup, command prefixes, etc.
"""
return subproc.call(cmd, timeout=timeout or self.timeout,
keep_rc=keep_rc, env=env) | Subclasses can override to provide special
environment setup, command prefixes, etc. | Below is the the instruction that describes the task:
### Input:
Subclasses can override to provide special
environment setup, command prefixes, etc.
### Response:
def check_output(self, cmd, timeout=None, keep_rc=False, env=None):
""" Subclasses can override to provide special
environment setup, command prefixes, etc.
"""
return subproc.call(cmd, timeout=timeout or self.timeout,
keep_rc=keep_rc, env=env) |
def serialize_footnote(ctx, document, el, root):
"Serializes footnotes."
footnote_num = el.rid
if el.rid not in ctx.footnote_list:
ctx.footnote_id += 1
ctx.footnote_list[el.rid] = ctx.footnote_id
footnote_num = ctx.footnote_list[el.rid]
note = etree.SubElement(root, 'sup')
link = etree.SubElement(note, 'a')
link.set('href', '#')
link.text = u'{}'.format(footnote_num)
fire_hooks(ctx, document, el, note, ctx.get_hook('footnote'))
return root | Serializes footnotes. | Below is the the instruction that describes the task:
### Input:
Serializes footnotes.
### Response:
def serialize_footnote(ctx, document, el, root):
"Serializes footnotes."
footnote_num = el.rid
if el.rid not in ctx.footnote_list:
ctx.footnote_id += 1
ctx.footnote_list[el.rid] = ctx.footnote_id
footnote_num = ctx.footnote_list[el.rid]
note = etree.SubElement(root, 'sup')
link = etree.SubElement(note, 'a')
link.set('href', '#')
link.text = u'{}'.format(footnote_num)
fire_hooks(ctx, document, el, note, ctx.get_hook('footnote'))
return root |
def analyze_number(var, err=''):
""" Analyse number for type and split from unit
1px -> (q, 'px')
args:
var (str): number string
kwargs:
err (str): Error message
raises:
SyntaxError
returns:
tuple
"""
n, u = split_unit(var)
if not isinstance(var, string_types):
return (var, u)
if is_color(var):
return (var, 'color')
if is_int(n):
n = int(n)
elif is_float(n):
n = float(n)
else:
raise SyntaxError('%s ´%s´' % (err, var))
return (n, u) | Analyse number for type and split from unit
1px -> (q, 'px')
args:
var (str): number string
kwargs:
err (str): Error message
raises:
SyntaxError
returns:
tuple | Below is the the instruction that describes the task:
### Input:
Analyse number for type and split from unit
1px -> (q, 'px')
args:
var (str): number string
kwargs:
err (str): Error message
raises:
SyntaxError
returns:
tuple
### Response:
def analyze_number(var, err=''):
""" Analyse number for type and split from unit
1px -> (q, 'px')
args:
var (str): number string
kwargs:
err (str): Error message
raises:
SyntaxError
returns:
tuple
"""
n, u = split_unit(var)
if not isinstance(var, string_types):
return (var, u)
if is_color(var):
return (var, 'color')
if is_int(n):
n = int(n)
elif is_float(n):
n = float(n)
else:
raise SyntaxError('%s ´%s´' % (err, var))
return (n, u) |
def slim_optimize(self, error_value=float('nan'), message=None):
"""Optimize model without creating a solution object.
Creating a full solution object implies fetching shadow prices and
flux values for all reactions and metabolites from the solver
object. This necessarily takes some time and in cases where only one
or two values are of interest, it is recommended to instead use this
function which does not create a solution object returning only the
value of the objective. Note however that the `optimize()` function
uses efficient means to fetch values so if you need fluxes/shadow
prices for more than say 4 reactions/metabolites, then the total
speed increase of `slim_optimize` versus `optimize` is expected to
be small or even negative depending on how you fetch the values
after optimization.
Parameters
----------
error_value : float, None
The value to return if optimization failed due to e.g.
infeasibility. If None, raise `OptimizationError` if the
optimization fails.
message : string
Error message to use if the model optimization did not succeed.
Returns
-------
float
The objective value.
"""
self.solver.optimize()
if self.solver.status == optlang.interface.OPTIMAL:
return self.solver.objective.value
elif error_value is not None:
return error_value
else:
assert_optimal(self, message) | Optimize model without creating a solution object.
Creating a full solution object implies fetching shadow prices and
flux values for all reactions and metabolites from the solver
object. This necessarily takes some time and in cases where only one
or two values are of interest, it is recommended to instead use this
function which does not create a solution object returning only the
value of the objective. Note however that the `optimize()` function
uses efficient means to fetch values so if you need fluxes/shadow
prices for more than say 4 reactions/metabolites, then the total
speed increase of `slim_optimize` versus `optimize` is expected to
be small or even negative depending on how you fetch the values
after optimization.
Parameters
----------
error_value : float, None
The value to return if optimization failed due to e.g.
infeasibility. If None, raise `OptimizationError` if the
optimization fails.
message : string
Error message to use if the model optimization did not succeed.
Returns
-------
float
The objective value. | Below is the the instruction that describes the task:
### Input:
Optimize model without creating a solution object.
Creating a full solution object implies fetching shadow prices and
flux values for all reactions and metabolites from the solver
object. This necessarily takes some time and in cases where only one
or two values are of interest, it is recommended to instead use this
function which does not create a solution object returning only the
value of the objective. Note however that the `optimize()` function
uses efficient means to fetch values so if you need fluxes/shadow
prices for more than say 4 reactions/metabolites, then the total
speed increase of `slim_optimize` versus `optimize` is expected to
be small or even negative depending on how you fetch the values
after optimization.
Parameters
----------
error_value : float, None
The value to return if optimization failed due to e.g.
infeasibility. If None, raise `OptimizationError` if the
optimization fails.
message : string
Error message to use if the model optimization did not succeed.
Returns
-------
float
The objective value.
### Response:
def slim_optimize(self, error_value=float('nan'), message=None):
"""Optimize model without creating a solution object.
Creating a full solution object implies fetching shadow prices and
flux values for all reactions and metabolites from the solver
object. This necessarily takes some time and in cases where only one
or two values are of interest, it is recommended to instead use this
function which does not create a solution object returning only the
value of the objective. Note however that the `optimize()` function
uses efficient means to fetch values so if you need fluxes/shadow
prices for more than say 4 reactions/metabolites, then the total
speed increase of `slim_optimize` versus `optimize` is expected to
be small or even negative depending on how you fetch the values
after optimization.
Parameters
----------
error_value : float, None
The value to return if optimization failed due to e.g.
infeasibility. If None, raise `OptimizationError` if the
optimization fails.
message : string
Error message to use if the model optimization did not succeed.
Returns
-------
float
The objective value.
"""
self.solver.optimize()
if self.solver.status == optlang.interface.OPTIMAL:
return self.solver.objective.value
elif error_value is not None:
return error_value
else:
assert_optimal(self, message) |
def invisible_canvas():
"""
Context manager yielding a temporary canvas drawn in batch mode, invisible
to the user. Original state is restored on exit.
Example use; obtain X axis object without interfering with anything::
with invisible_canvas() as c:
efficiency.Draw()
g = efficiency.GetPaintedGraph()
return g.GetXaxis()
"""
with preserve_current_canvas():
with preserve_batch_state():
ROOT.gROOT.SetBatch()
c = ROOT.TCanvas()
try:
c.cd()
yield c
finally:
c.Close()
c.IsA().Destructor(c) | Context manager yielding a temporary canvas drawn in batch mode, invisible
to the user. Original state is restored on exit.
Example use; obtain X axis object without interfering with anything::
with invisible_canvas() as c:
efficiency.Draw()
g = efficiency.GetPaintedGraph()
return g.GetXaxis() | Below is the the instruction that describes the task:
### Input:
Context manager yielding a temporary canvas drawn in batch mode, invisible
to the user. Original state is restored on exit.
Example use; obtain X axis object without interfering with anything::
with invisible_canvas() as c:
efficiency.Draw()
g = efficiency.GetPaintedGraph()
return g.GetXaxis()
### Response:
def invisible_canvas():
"""
Context manager yielding a temporary canvas drawn in batch mode, invisible
to the user. Original state is restored on exit.
Example use; obtain X axis object without interfering with anything::
with invisible_canvas() as c:
efficiency.Draw()
g = efficiency.GetPaintedGraph()
return g.GetXaxis()
"""
with preserve_current_canvas():
with preserve_batch_state():
ROOT.gROOT.SetBatch()
c = ROOT.TCanvas()
try:
c.cd()
yield c
finally:
c.Close()
c.IsA().Destructor(c) |
def is_port_default(self):
'''Return whether the URL is using the default port.'''
if self.scheme in RELATIVE_SCHEME_DEFAULT_PORTS:
return RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] == self.port | Return whether the URL is using the default port. | Below is the the instruction that describes the task:
### Input:
Return whether the URL is using the default port.
### Response:
def is_port_default(self):
'''Return whether the URL is using the default port.'''
if self.scheme in RELATIVE_SCHEME_DEFAULT_PORTS:
return RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] == self.port |
def bohachevsky1(theta):
"""One of the Bohachevsky functions"""
x, y = theta
obj = x ** 2 + 2 * y ** 2 - 0.3 * np.cos(3 * np.pi * x) - 0.4 * np.cos(4 * np.pi * y) + 0.7
grad = np.array([
2 * x + 0.3 * np.sin(3 * np.pi * x) * 3 * np.pi,
4 * y + 0.4 * np.sin(4 * np.pi * y) * 4 * np.pi,
])
return obj, grad | One of the Bohachevsky functions | Below is the the instruction that describes the task:
### Input:
One of the Bohachevsky functions
### Response:
def bohachevsky1(theta):
"""One of the Bohachevsky functions"""
x, y = theta
obj = x ** 2 + 2 * y ** 2 - 0.3 * np.cos(3 * np.pi * x) - 0.4 * np.cos(4 * np.pi * y) + 0.7
grad = np.array([
2 * x + 0.3 * np.sin(3 * np.pi * x) * 3 * np.pi,
4 * y + 0.4 * np.sin(4 * np.pi * y) * 4 * np.pi,
])
return obj, grad |
def remove_none_value(data):
"""remove item from dict if value is None.
return new dict.
"""
return dict((k, v) for k, v in data.items() if v is not None) | remove item from dict if value is None.
return new dict. | Below is the the instruction that describes the task:
### Input:
remove item from dict if value is None.
return new dict.
### Response:
def remove_none_value(data):
"""remove item from dict if value is None.
return new dict.
"""
return dict((k, v) for k, v in data.items() if v is not None) |
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None):
'''
Make a web call to GoGrid
.. versionadded:: 2015.8.0
'''
vm_ = get_configured_provider()
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
sharedsecret = config.get_cloud_config_value(
'sharedsecret', vm_, __opts__, search_global=False
)
path = 'https://api.gogrid.com/api/'
if action:
path += action
if command:
path += '/{0}'.format(command)
log.debug('GoGrid URL: %s', path)
if not isinstance(args, dict):
args = {}
epoch = six.text_type(int(time.time()))
hashtext = ''.join((apikey, sharedsecret, epoch))
args['sig'] = salt.utils.hashutils.md5_digest(hashtext)
args['format'] = 'json'
args['v'] = '1.0'
args['api_key'] = apikey
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
return_content = None
result = salt.utils.http.query(
path,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
opts=__opts__,
)
log.debug('GoGrid Response Status Code: %s', result['status'])
return result['dict'] | Make a web call to GoGrid
.. versionadded:: 2015.8.0 | Below is the the instruction that describes the task:
### Input:
Make a web call to GoGrid
.. versionadded:: 2015.8.0
### Response:
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None):
'''
Make a web call to GoGrid
.. versionadded:: 2015.8.0
'''
vm_ = get_configured_provider()
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
sharedsecret = config.get_cloud_config_value(
'sharedsecret', vm_, __opts__, search_global=False
)
path = 'https://api.gogrid.com/api/'
if action:
path += action
if command:
path += '/{0}'.format(command)
log.debug('GoGrid URL: %s', path)
if not isinstance(args, dict):
args = {}
epoch = six.text_type(int(time.time()))
hashtext = ''.join((apikey, sharedsecret, epoch))
args['sig'] = salt.utils.hashutils.md5_digest(hashtext)
args['format'] = 'json'
args['v'] = '1.0'
args['api_key'] = apikey
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
return_content = None
result = salt.utils.http.query(
path,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
opts=__opts__,
)
log.debug('GoGrid Response Status Code: %s', result['status'])
return result['dict'] |
def export_launch_vm(self, description, progress, virtual_box):
"""Exports and optionally launch a VM described in description parameter
in description of type :class:`IVirtualSystemDescription`
VirtualSystemDescription object which is describing a machine and all required parameters.
in progress of type :class:`IProgress`
Progress object to track the operation completion.
in virtual_box of type :class:`IVirtualBox`
Reference to the server-side API root object.
"""
if not isinstance(description, IVirtualSystemDescription):
raise TypeError("description can only be an instance of type IVirtualSystemDescription")
if not isinstance(progress, IProgress):
raise TypeError("progress can only be an instance of type IProgress")
if not isinstance(virtual_box, IVirtualBox):
raise TypeError("virtual_box can only be an instance of type IVirtualBox")
self._call("exportLaunchVM",
in_p=[description, progress, virtual_box]) | Exports and optionally launch a VM described in description parameter
in description of type :class:`IVirtualSystemDescription`
VirtualSystemDescription object which is describing a machine and all required parameters.
in progress of type :class:`IProgress`
Progress object to track the operation completion.
in virtual_box of type :class:`IVirtualBox`
Reference to the server-side API root object. | Below is the the instruction that describes the task:
### Input:
Exports and optionally launch a VM described in description parameter
in description of type :class:`IVirtualSystemDescription`
VirtualSystemDescription object which is describing a machine and all required parameters.
in progress of type :class:`IProgress`
Progress object to track the operation completion.
in virtual_box of type :class:`IVirtualBox`
Reference to the server-side API root object.
### Response:
def export_launch_vm(self, description, progress, virtual_box):
"""Exports and optionally launch a VM described in description parameter
in description of type :class:`IVirtualSystemDescription`
VirtualSystemDescription object which is describing a machine and all required parameters.
in progress of type :class:`IProgress`
Progress object to track the operation completion.
in virtual_box of type :class:`IVirtualBox`
Reference to the server-side API root object.
"""
if not isinstance(description, IVirtualSystemDescription):
raise TypeError("description can only be an instance of type IVirtualSystemDescription")
if not isinstance(progress, IProgress):
raise TypeError("progress can only be an instance of type IProgress")
if not isinstance(virtual_box, IVirtualBox):
raise TypeError("virtual_box can only be an instance of type IVirtualBox")
self._call("exportLaunchVM",
in_p=[description, progress, virtual_box]) |
def _get_char(self, win, char):
def get_check_next_byte():
char = win.getch()
if 128 <= char <= 191:
return char
else:
raise UnicodeError
bytes = []
if char <= 127:
# 1 bytes
bytes.append(char)
#elif 194 <= char <= 223:
elif 192 <= char <= 223:
# 2 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
elif 224 <= char <= 239:
# 3 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
elif 240 <= char <= 244:
# 4 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
#print('bytes = {}'.format(bytes))
""" no zero byte allowed """
while 0 in bytes:
bytes.remove(0)
if version_info < (3, 0):
out = ''.join([chr(b) for b in bytes])
else:
buf = bytearray(bytes)
out = self._decode_string(buf)
#out = buf.decode('utf-8')
return out | no zero byte allowed | Below is the the instruction that describes the task:
### Input:
no zero byte allowed
### Response:
def _get_char(self, win, char):
def get_check_next_byte():
char = win.getch()
if 128 <= char <= 191:
return char
else:
raise UnicodeError
bytes = []
if char <= 127:
# 1 bytes
bytes.append(char)
#elif 194 <= char <= 223:
elif 192 <= char <= 223:
# 2 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
elif 224 <= char <= 239:
# 3 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
elif 240 <= char <= 244:
# 4 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
#print('bytes = {}'.format(bytes))
""" no zero byte allowed """
while 0 in bytes:
bytes.remove(0)
if version_info < (3, 0):
out = ''.join([chr(b) for b in bytes])
else:
buf = bytearray(bytes)
out = self._decode_string(buf)
#out = buf.decode('utf-8')
return out |
def add_tooltip_to_highlighted_item(self, index):
"""
Add a tooltip showing the full path of the currently highlighted item
of the PathComboBox.
"""
self.setItemData(index, self.itemText(index), Qt.ToolTipRole) | Add a tooltip showing the full path of the currently highlighted item
of the PathComboBox. | Below is the the instruction that describes the task:
### Input:
Add a tooltip showing the full path of the currently highlighted item
of the PathComboBox.
### Response:
def add_tooltip_to_highlighted_item(self, index):
"""
Add a tooltip showing the full path of the currently highlighted item
of the PathComboBox.
"""
self.setItemData(index, self.itemText(index), Qt.ToolTipRole) |
def delete(args):
"""
Delete a river by name
"""
m = RiverManager(args.hosts)
m.delete(args.name) | Delete a river by name | Below is the the instruction that describes the task:
### Input:
Delete a river by name
### Response:
def delete(args):
"""
Delete a river by name
"""
m = RiverManager(args.hosts)
m.delete(args.name) |
def as_dict(df, ix=':'):
""" converts df to dict and adds a datetime field if df is datetime """
if isinstance(df.index, pd.DatetimeIndex):
df['datetime'] = df.index
return df.to_dict(orient='records')[ix] | converts df to dict and adds a datetime field if df is datetime | Below is the the instruction that describes the task:
### Input:
converts df to dict and adds a datetime field if df is datetime
### Response:
def as_dict(df, ix=':'):
""" converts df to dict and adds a datetime field if df is datetime """
if isinstance(df.index, pd.DatetimeIndex):
df['datetime'] = df.index
return df.to_dict(orient='records')[ix] |
def decode_cmd_out(self, completed_cmd):
"""
return a standard message
"""
try:
stdout = completed_cmd.stdout.encode('utf-8').decode()
except AttributeError:
try:
stdout = str(bytes(completed_cmd.stdout), 'big5').strip()
except AttributeError:
stdout = str(bytes(completed_cmd.stdout).decode('utf-8')).strip()
try:
stderr = completed_cmd.stderr.encode('utf-8').decode()
except AttributeError:
try:
stderr = str(bytes(completed_cmd.stderr), 'big5').strip()
except AttributeError:
stderr = str(bytes(completed_cmd.stderr).decode('utf-8')).strip()
return ParsedCompletedCommand(
completed_cmd.returncode,
completed_cmd.args,
stdout,
stderr
) | return a standard message | Below is the the instruction that describes the task:
### Input:
return a standard message
### Response:
def decode_cmd_out(self, completed_cmd):
"""
return a standard message
"""
try:
stdout = completed_cmd.stdout.encode('utf-8').decode()
except AttributeError:
try:
stdout = str(bytes(completed_cmd.stdout), 'big5').strip()
except AttributeError:
stdout = str(bytes(completed_cmd.stdout).decode('utf-8')).strip()
try:
stderr = completed_cmd.stderr.encode('utf-8').decode()
except AttributeError:
try:
stderr = str(bytes(completed_cmd.stderr), 'big5').strip()
except AttributeError:
stderr = str(bytes(completed_cmd.stderr).decode('utf-8')).strip()
return ParsedCompletedCommand(
completed_cmd.returncode,
completed_cmd.args,
stdout,
stderr
) |
def resolve_field_instance(cls_or_instance):
"""Return a Schema instance from a Schema class or instance.
:param type|Schema cls_or_instance: Marshmallow Schema class or instance.
"""
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, FieldABC):
raise FieldInstanceResolutionError
return cls_or_instance()
else:
if not isinstance(cls_or_instance, FieldABC):
raise FieldInstanceResolutionError
return cls_or_instance | Return a Schema instance from a Schema class or instance.
:param type|Schema cls_or_instance: Marshmallow Schema class or instance. | Below is the the instruction that describes the task:
### Input:
Return a Schema instance from a Schema class or instance.
:param type|Schema cls_or_instance: Marshmallow Schema class or instance.
### Response:
def resolve_field_instance(cls_or_instance):
"""Return a Schema instance from a Schema class or instance.
:param type|Schema cls_or_instance: Marshmallow Schema class or instance.
"""
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, FieldABC):
raise FieldInstanceResolutionError
return cls_or_instance()
else:
if not isinstance(cls_or_instance, FieldABC):
raise FieldInstanceResolutionError
return cls_or_instance |
def _rotate_point(point, angle, ishape, rshape, reverse=False):
"""Transform a point from original image coordinates to rotated image
coordinates and back. It assumes the rotation point is the center of an
image.
This works on a simple rotation transformation::
newx = (startx) * np.cos(angle) - (starty) * np.sin(angle)
newy = (startx) * np.sin(angle) + (starty) * np.cos(angle)
It takes into account the differences in image size.
Parameters
----------
point : tuple
Point to be rotated, in the format of ``(x, y)`` measured from
origin.
angle : float
The angle in degrees to rotate the point by as measured
counter-clockwise from the X axis.
ishape : tuple
The shape of the original image, taken from ``image.shape``.
rshape : tuple
The shape of the rotated image, in the form of ``rotate.shape``.
reverse : bool, optional
Transform from rotated coordinates back to non-rotated image.
Returns
-------
rotated_point : tuple
Rotated point in the format of ``(x, y)`` as measured from origin.
"""
# unpack the image and rotated images shapes
if reverse:
angle = (angle * -1)
temp = ishape
ishape = rshape
rshape = temp
# transform into center of image coordinates
yhalf, xhalf = ishape
yrhalf, xrhalf = rshape
yhalf = yhalf / 2
xhalf = xhalf / 2
yrhalf = yrhalf / 2
xrhalf = xrhalf / 2
startx = point[0] - xhalf
starty = point[1] - yhalf
# do the rotation
newx = startx * np.cos(angle) - starty * np.sin(angle)
newy = startx * np.sin(angle) + starty * np.cos(angle)
# add back the padding from changing the size of the image
newx = newx + xrhalf
newy = newy + yrhalf
return (newx, newy) | Transform a point from original image coordinates to rotated image
coordinates and back. It assumes the rotation point is the center of an
image.
This works on a simple rotation transformation::
newx = (startx) * np.cos(angle) - (starty) * np.sin(angle)
newy = (startx) * np.sin(angle) + (starty) * np.cos(angle)
It takes into account the differences in image size.
Parameters
----------
point : tuple
Point to be rotated, in the format of ``(x, y)`` measured from
origin.
angle : float
The angle in degrees to rotate the point by as measured
counter-clockwise from the X axis.
ishape : tuple
The shape of the original image, taken from ``image.shape``.
rshape : tuple
The shape of the rotated image, in the form of ``rotate.shape``.
reverse : bool, optional
Transform from rotated coordinates back to non-rotated image.
Returns
-------
rotated_point : tuple
Rotated point in the format of ``(x, y)`` as measured from origin. | Below is the the instruction that describes the task:
### Input:
Transform a point from original image coordinates to rotated image
coordinates and back. It assumes the rotation point is the center of an
image.
This works on a simple rotation transformation::
newx = (startx) * np.cos(angle) - (starty) * np.sin(angle)
newy = (startx) * np.sin(angle) + (starty) * np.cos(angle)
It takes into account the differences in image size.
Parameters
----------
point : tuple
Point to be rotated, in the format of ``(x, y)`` measured from
origin.
angle : float
The angle in degrees to rotate the point by as measured
counter-clockwise from the X axis.
ishape : tuple
The shape of the original image, taken from ``image.shape``.
rshape : tuple
The shape of the rotated image, in the form of ``rotate.shape``.
reverse : bool, optional
Transform from rotated coordinates back to non-rotated image.
Returns
-------
rotated_point : tuple
Rotated point in the format of ``(x, y)`` as measured from origin.
### Response:
def _rotate_point(point, angle, ishape, rshape, reverse=False):
"""Transform a point from original image coordinates to rotated image
coordinates and back. It assumes the rotation point is the center of an
image.
This works on a simple rotation transformation::
newx = (startx) * np.cos(angle) - (starty) * np.sin(angle)
newy = (startx) * np.sin(angle) + (starty) * np.cos(angle)
It takes into account the differences in image size.
Parameters
----------
point : tuple
Point to be rotated, in the format of ``(x, y)`` measured from
origin.
angle : float
The angle in degrees to rotate the point by as measured
counter-clockwise from the X axis.
ishape : tuple
The shape of the original image, taken from ``image.shape``.
rshape : tuple
The shape of the rotated image, in the form of ``rotate.shape``.
reverse : bool, optional
Transform from rotated coordinates back to non-rotated image.
Returns
-------
rotated_point : tuple
Rotated point in the format of ``(x, y)`` as measured from origin.
"""
# unpack the image and rotated images shapes
if reverse:
angle = (angle * -1)
temp = ishape
ishape = rshape
rshape = temp
# transform into center of image coordinates
yhalf, xhalf = ishape
yrhalf, xrhalf = rshape
yhalf = yhalf / 2
xhalf = xhalf / 2
yrhalf = yrhalf / 2
xrhalf = xrhalf / 2
startx = point[0] - xhalf
starty = point[1] - yhalf
# do the rotation
newx = startx * np.cos(angle) - starty * np.sin(angle)
newy = startx * np.sin(angle) + starty * np.cos(angle)
# add back the padding from changing the size of the image
newx = newx + xrhalf
newy = newy + yrhalf
return (newx, newy) |
def trackjobs(func, results, spacer):
"""
Blocks and prints progress for just the func being requested from a list
of submitted engine jobs. Returns whether any of the jobs failed.
func = str
results = dict of asyncs
"""
## TODO: try to insert a better way to break on KBD here.
LOGGER.info("inside trackjobs of %s", func)
## get just the jobs from results that are relevant to this func
asyncs = [(i, results[i]) for i in results if i.split("-", 2)[0] == func]
## progress bar
start = time.time()
while 1:
## how many of this func have finished so far
ready = [i[1].ready() for i in asyncs]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
printstr = " {} | {} | s3 |".format(PRINTSTR[func], elapsed)
progressbar(len(ready), sum(ready), printstr, spacer=spacer)
time.sleep(0.1)
if len(ready) == sum(ready):
print("")
break
sfails = []
errmsgs = []
for job in asyncs:
if not job[1].successful():
sfails.append(job[0])
errmsgs.append(job[1].result())
return func, sfails, errmsgs | Blocks and prints progress for just the func being requested from a list
of submitted engine jobs. Returns whether any of the jobs failed.
func = str
results = dict of asyncs | Below is the the instruction that describes the task:
### Input:
Blocks and prints progress for just the func being requested from a list
of submitted engine jobs. Returns whether any of the jobs failed.
func = str
results = dict of asyncs
### Response:
def trackjobs(func, results, spacer):
"""
Blocks and prints progress for just the func being requested from a list
of submitted engine jobs. Returns whether any of the jobs failed.
func = str
results = dict of asyncs
"""
## TODO: try to insert a better way to break on KBD here.
LOGGER.info("inside trackjobs of %s", func)
## get just the jobs from results that are relevant to this func
asyncs = [(i, results[i]) for i in results if i.split("-", 2)[0] == func]
## progress bar
start = time.time()
while 1:
## how many of this func have finished so far
ready = [i[1].ready() for i in asyncs]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
printstr = " {} | {} | s3 |".format(PRINTSTR[func], elapsed)
progressbar(len(ready), sum(ready), printstr, spacer=spacer)
time.sleep(0.1)
if len(ready) == sum(ready):
print("")
break
sfails = []
errmsgs = []
for job in asyncs:
if not job[1].successful():
sfails.append(job[0])
errmsgs.append(job[1].result())
return func, sfails, errmsgs |
def terminate(self):
'''Stop the server process and change our state to TERMINATING. Only valid if state=READY.'''
logger.debug('client.terminate() called (state=%s)', self.strstate)
if self.state == ClientState.WAITING_FOR_RESULT:
raise ClientStateError('terimate() called while state='+self.strstate)
if self.state == ClientState.TERMINATING:
raise ClientStateError('terimate() called while state='+self.strstate)
elif self.state in ClientState.TerminatedSet:
assert not self._server_process.is_alive()
return
elif self.state == ClientState.READY:
# Check that the process itself is still alive
self._assert_alive()
# Make sure the SIGCHLD signal handler doesn't throw any exceptions
self.state = ClientState.TERMINATING
# Do not call execute() because that function will check
# whether the process is alive and throw an exception if not
# TODO: can the queue itself throw exceptions?
self._delegate_channel.put(FunctionCallDelegate(_raise_terminate))
# Wait for acknowledgement
try:
self._read_result(num_retries=5)
except ProcessTerminationError as ex:
pass
except ChannelError as ex:
# Was interrupted five times in a row! Ignore for now
logger.debug('client failed to read sentinel from channel after 5 retries - will terminate anyway')
self.state = ClientState.TERMINATED_CLEANLY | Stop the server process and change our state to TERMINATING. Only valid if state=READY. | Below is the the instruction that describes the task:
### Input:
Stop the server process and change our state to TERMINATING. Only valid if state=READY.
### Response:
def terminate(self):
'''Stop the server process and change our state to TERMINATING. Only valid if state=READY.'''
logger.debug('client.terminate() called (state=%s)', self.strstate)
if self.state == ClientState.WAITING_FOR_RESULT:
raise ClientStateError('terimate() called while state='+self.strstate)
if self.state == ClientState.TERMINATING:
raise ClientStateError('terimate() called while state='+self.strstate)
elif self.state in ClientState.TerminatedSet:
assert not self._server_process.is_alive()
return
elif self.state == ClientState.READY:
# Check that the process itself is still alive
self._assert_alive()
# Make sure the SIGCHLD signal handler doesn't throw any exceptions
self.state = ClientState.TERMINATING
# Do not call execute() because that function will check
# whether the process is alive and throw an exception if not
# TODO: can the queue itself throw exceptions?
self._delegate_channel.put(FunctionCallDelegate(_raise_terminate))
# Wait for acknowledgement
try:
self._read_result(num_retries=5)
except ProcessTerminationError as ex:
pass
except ChannelError as ex:
# Was interrupted five times in a row! Ignore for now
logger.debug('client failed to read sentinel from channel after 5 retries - will terminate anyway')
self.state = ClientState.TERMINATED_CLEANLY |
def _read_body_by_length(self, response, file):
'''Read the connection specified by a length.
Coroutine.
'''
_logger.debug('Reading body by length.')
file_is_async = hasattr(file, 'drain')
try:
body_size = int(response.fields['Content-Length'])
if body_size < 0:
raise ValueError('Content length cannot be negative.')
except ValueError as error:
_logger.warning(__(
_('Invalid content length: {error}'), error=error
))
yield from self._read_body_until_close(response, file)
return
bytes_left = body_size
while bytes_left > 0:
data = yield from self._connection.read(self._read_size)
if not data:
break
bytes_left -= len(data)
if bytes_left < 0:
data = data[:bytes_left]
_logger.warning(_('Content overrun.'))
self.close()
self._data_event_dispatcher.notify_read(data)
content_data = self._decompress_data(data)
if file:
file.write(content_data)
if file_is_async:
yield from file.drain()
if bytes_left > 0:
raise NetworkError('Connection closed.')
content_data = self._flush_decompressor()
if file and content_data:
file.write(content_data)
if file_is_async:
yield from file.drain() | Read the connection specified by a length.
Coroutine. | Below is the the instruction that describes the task:
### Input:
Read the connection specified by a length.
Coroutine.
### Response:
def _read_body_by_length(self, response, file):
'''Read the connection specified by a length.
Coroutine.
'''
_logger.debug('Reading body by length.')
file_is_async = hasattr(file, 'drain')
try:
body_size = int(response.fields['Content-Length'])
if body_size < 0:
raise ValueError('Content length cannot be negative.')
except ValueError as error:
_logger.warning(__(
_('Invalid content length: {error}'), error=error
))
yield from self._read_body_until_close(response, file)
return
bytes_left = body_size
while bytes_left > 0:
data = yield from self._connection.read(self._read_size)
if not data:
break
bytes_left -= len(data)
if bytes_left < 0:
data = data[:bytes_left]
_logger.warning(_('Content overrun.'))
self.close()
self._data_event_dispatcher.notify_read(data)
content_data = self._decompress_data(data)
if file:
file.write(content_data)
if file_is_async:
yield from file.drain()
if bytes_left > 0:
raise NetworkError('Connection closed.')
content_data = self._flush_decompressor()
if file and content_data:
file.write(content_data)
if file_is_async:
yield from file.drain() |
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Slash and self.isVisible():
self.show_find_widget.emit() | Reimplement Qt Method - Basic keypress event handler | Below is the the instruction that describes the task:
### Input:
Reimplement Qt Method - Basic keypress event handler
### Response:
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Slash and self.isVisible():
self.show_find_widget.emit() |
def rhymes(word):
"""Get words rhyming with a given word.
This function may return an empty list if no rhyming words are found in
the dictionary, or if the word you pass to the function is itself not
found in the dictionary.
.. doctest::
>>> import pronouncing
>>> pronouncing.rhymes("conditioner")
['commissioner', 'parishioner', 'petitioner', 'practitioner']
:param word: a word
:returns: a list of rhyming words
"""
phones = phones_for_word(word)
combined_rhymes = []
if phones:
for element in phones:
combined_rhymes.append([w for w in rhyme_lookup.get(rhyming_part(
element), []) if w != word])
combined_rhymes = list(chain.from_iterable(combined_rhymes))
unique_combined_rhymes = sorted(set(combined_rhymes))
return unique_combined_rhymes
else:
return [] | Get words rhyming with a given word.
This function may return an empty list if no rhyming words are found in
the dictionary, or if the word you pass to the function is itself not
found in the dictionary.
.. doctest::
>>> import pronouncing
>>> pronouncing.rhymes("conditioner")
['commissioner', 'parishioner', 'petitioner', 'practitioner']
:param word: a word
:returns: a list of rhyming words | Below is the the instruction that describes the task:
### Input:
Get words rhyming with a given word.
This function may return an empty list if no rhyming words are found in
the dictionary, or if the word you pass to the function is itself not
found in the dictionary.
.. doctest::
>>> import pronouncing
>>> pronouncing.rhymes("conditioner")
['commissioner', 'parishioner', 'petitioner', 'practitioner']
:param word: a word
:returns: a list of rhyming words
### Response:
def rhymes(word):
"""Get words rhyming with a given word.
This function may return an empty list if no rhyming words are found in
the dictionary, or if the word you pass to the function is itself not
found in the dictionary.
.. doctest::
>>> import pronouncing
>>> pronouncing.rhymes("conditioner")
['commissioner', 'parishioner', 'petitioner', 'practitioner']
:param word: a word
:returns: a list of rhyming words
"""
phones = phones_for_word(word)
combined_rhymes = []
if phones:
for element in phones:
combined_rhymes.append([w for w in rhyme_lookup.get(rhyming_part(
element), []) if w != word])
combined_rhymes = list(chain.from_iterable(combined_rhymes))
unique_combined_rhymes = sorted(set(combined_rhymes))
return unique_combined_rhymes
else:
return [] |
def _get_ancestors_of(self, obs_nodes_list):
"""
Returns a list of all ancestors of all the observed nodes.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes
"""
if not obs_nodes_list:
return set()
return set(obs_nodes_list) | set(self.parent_node) | Returns a list of all ancestors of all the observed nodes.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes | Below is the the instruction that describes the task:
### Input:
Returns a list of all ancestors of all the observed nodes.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes
### Response:
def _get_ancestors_of(self, obs_nodes_list):
"""
Returns a list of all ancestors of all the observed nodes.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes
"""
if not obs_nodes_list:
return set()
return set(obs_nodes_list) | set(self.parent_node) |
def GlobForPaths(self,
paths,
pathtype="OS",
root_path=None,
process_non_regular_files=False,
collect_ext_attrs=False):
"""Starts the Glob.
This is the main entry point for this flow mixin.
First we convert the pattern into regex components, and then we
interpolate each component. Finally, we generate a cartesian product of all
combinations.
Args:
paths: A list of GlobExpression instances.
pathtype: The pathtype to use for creating pathspecs.
root_path: A pathspec where to start searching from.
process_non_regular_files: Work with all kinds of files - not only with
regular ones.
collect_ext_attrs: Whether to gather information about file extended
attributes.
"""
patterns = []
if not paths:
# Nothing to do.
return
self.state.pathtype = pathtype
self.state.root_path = root_path
self.state.process_non_regular_files = process_non_regular_files
self.state.collect_ext_attrs = collect_ext_attrs
# Transform the patterns by substitution of client attributes. When the
# client has multiple values for an attribute, this generates multiple
# copies of the pattern, one for each variation. e.g.:
# /home/%%Usernames%%/* -> [ /home/user1/*, /home/user2/* ]
for path in paths:
patterns.extend(
path.Interpolate(knowledge_base=self.client_knowledge_base))
# Sort the patterns so that if there are files whose paths conflict with
# directory paths, the files get handled after the conflicting directories
# have been added to the component tree.
patterns.sort(key=len, reverse=True)
# Expand each glob pattern into a list of components. A component is either
# a wildcard or a literal component.
# e.g. /usr/lib/*.exe -> ['/usr/lib', '.*.exe']
# We build a tree for each component such that duplicated components are
# merged. We do not need to reissue the same client requests for the same
# components. For example, the patterns:
# '/home/%%Usernames%%*' -> {'/home/': {
# 'syslog.*\\Z(?ms)': {}, 'test.*\\Z(?ms)': {}}}
# Note: The component tree contains serialized pathspecs in dicts.
for pattern in patterns:
# The root node.
curr_node = self.state.component_tree
components = self.ConvertGlobIntoPathComponents(pattern)
for i, curr_component in enumerate(components):
is_last_component = i == len(components) - 1
next_node = curr_node.get(curr_component.SerializeToString(), {})
if is_last_component and next_node:
# There is a conflicting directory already existing in the tree.
# Replace the directory node with a node representing this file.
curr_node[curr_component.SerializeToString()] = {}
else:
curr_node = curr_node.setdefault(curr_component.SerializeToString(),
{})
root_path = next(iterkeys(self.state.component_tree))
self.CallStateInline(
messages=[None],
next_state="ProcessEntry",
request_data=dict(component_path=[root_path])) | Starts the Glob.
This is the main entry point for this flow mixin.
First we convert the pattern into regex components, and then we
interpolate each component. Finally, we generate a cartesian product of all
combinations.
Args:
paths: A list of GlobExpression instances.
pathtype: The pathtype to use for creating pathspecs.
root_path: A pathspec where to start searching from.
process_non_regular_files: Work with all kinds of files - not only with
regular ones.
collect_ext_attrs: Whether to gather information about file extended
attributes. | Below is the the instruction that describes the task:
### Input:
Starts the Glob.
This is the main entry point for this flow mixin.
First we convert the pattern into regex components, and then we
interpolate each component. Finally, we generate a cartesian product of all
combinations.
Args:
paths: A list of GlobExpression instances.
pathtype: The pathtype to use for creating pathspecs.
root_path: A pathspec where to start searching from.
process_non_regular_files: Work with all kinds of files - not only with
regular ones.
collect_ext_attrs: Whether to gather information about file extended
attributes.
### Response:
def GlobForPaths(self,
paths,
pathtype="OS",
root_path=None,
process_non_regular_files=False,
collect_ext_attrs=False):
"""Starts the Glob.
This is the main entry point for this flow mixin.
First we convert the pattern into regex components, and then we
interpolate each component. Finally, we generate a cartesian product of all
combinations.
Args:
paths: A list of GlobExpression instances.
pathtype: The pathtype to use for creating pathspecs.
root_path: A pathspec where to start searching from.
process_non_regular_files: Work with all kinds of files - not only with
regular ones.
collect_ext_attrs: Whether to gather information about file extended
attributes.
"""
patterns = []
if not paths:
# Nothing to do.
return
self.state.pathtype = pathtype
self.state.root_path = root_path
self.state.process_non_regular_files = process_non_regular_files
self.state.collect_ext_attrs = collect_ext_attrs
# Transform the patterns by substitution of client attributes. When the
# client has multiple values for an attribute, this generates multiple
# copies of the pattern, one for each variation. e.g.:
# /home/%%Usernames%%/* -> [ /home/user1/*, /home/user2/* ]
for path in paths:
patterns.extend(
path.Interpolate(knowledge_base=self.client_knowledge_base))
# Sort the patterns so that if there are files whose paths conflict with
# directory paths, the files get handled after the conflicting directories
# have been added to the component tree.
patterns.sort(key=len, reverse=True)
# Expand each glob pattern into a list of components. A component is either
# a wildcard or a literal component.
# e.g. /usr/lib/*.exe -> ['/usr/lib', '.*.exe']
# We build a tree for each component such that duplicated components are
# merged. We do not need to reissue the same client requests for the same
# components. For example, the patterns:
# '/home/%%Usernames%%*' -> {'/home/': {
# 'syslog.*\\Z(?ms)': {}, 'test.*\\Z(?ms)': {}}}
# Note: The component tree contains serialized pathspecs in dicts.
for pattern in patterns:
# The root node.
curr_node = self.state.component_tree
components = self.ConvertGlobIntoPathComponents(pattern)
for i, curr_component in enumerate(components):
is_last_component = i == len(components) - 1
next_node = curr_node.get(curr_component.SerializeToString(), {})
if is_last_component and next_node:
# There is a conflicting directory already existing in the tree.
# Replace the directory node with a node representing this file.
curr_node[curr_component.SerializeToString()] = {}
else:
curr_node = curr_node.setdefault(curr_component.SerializeToString(),
{})
root_path = next(iterkeys(self.state.component_tree))
self.CallStateInline(
messages=[None],
next_state="ProcessEntry",
request_data=dict(component_path=[root_path])) |
def is_allowed(func):
"""Check user password, when is correct, then run decorated function.
:returns: decorated function
"""
@wraps(func)
def _is_allowed(user, *args, **kwargs):
password = kwargs.pop('password', None)
if user.check_password(password):
return func(user, *args, **kwargs)
else:
raise NotAllowedError()
# add password parameter to function signature
sig = inspect.signature(func)
parms = list(sig.parameters.values())
parms.append(inspect.Parameter('password',
inspect.Parameter.KEYWORD_ONLY,
default=None))
_is_allowed.__signature__ = sig.replace(parameters=parms)
return _is_allowed | Check user password, when is correct, then run decorated function.
:returns: decorated function | Below is the the instruction that describes the task:
### Input:
Check user password, when is correct, then run decorated function.
:returns: decorated function
### Response:
def is_allowed(func):
"""Check user password, when is correct, then run decorated function.
:returns: decorated function
"""
@wraps(func)
def _is_allowed(user, *args, **kwargs):
password = kwargs.pop('password', None)
if user.check_password(password):
return func(user, *args, **kwargs)
else:
raise NotAllowedError()
# add password parameter to function signature
sig = inspect.signature(func)
parms = list(sig.parameters.values())
parms.append(inspect.Parameter('password',
inspect.Parameter.KEYWORD_ONLY,
default=None))
_is_allowed.__signature__ = sig.replace(parameters=parms)
return _is_allowed |
def get_object(self, path):
"""Get single object."""
obj = self.native_container.get_object(path)
return self.obj_cls.from_obj(self, obj) | Get single object. | Below is the the instruction that describes the task:
### Input:
Get single object.
### Response:
def get_object(self, path):
"""Get single object."""
obj = self.native_container.get_object(path)
return self.obj_cls.from_obj(self, obj) |
def getpass(self, prompt, default=None):
"""Provide a password prompt."""
return click.prompt(prompt, hide_input=True, default=default) | Provide a password prompt. | Below is the the instruction that describes the task:
### Input:
Provide a password prompt.
### Response:
def getpass(self, prompt, default=None):
"""Provide a password prompt."""
return click.prompt(prompt, hide_input=True, default=default) |
def fetch_pool(repo_url, branch='master', reuse_existing=False):
"""Fetch a git repository from ``repo_url`` and returns a ``FeaturePool`` object."""
repo_name = get_repo_name(repo_url)
lib_dir = get_lib_dir()
pool_dir = get_pool_dir(repo_name)
print('... fetching %s ' % repo_name)
if os.path.exists(pool_dir):
if not reuse_existing:
raise Exception('ERROR: repository already exists')
else:
try:
a = call(['git', 'clone', repo_url], cwd=lib_dir)
except OSError:
raise Exception('ERROR: You probably dont have git installed: sudo apt-get install git')
if a != 0:
raise Exception('ERROR: check your repository url and credentials!')
try:
call(['git', 'checkout', branch], cwd=pool_dir)
except OSError:
raise Exception('ERROR: cannot switch branches')
print('... repository successfully cloned')
return FeaturePool(pool_dir) | Fetch a git repository from ``repo_url`` and returns a ``FeaturePool`` object. | Below is the the instruction that describes the task:
### Input:
Fetch a git repository from ``repo_url`` and returns a ``FeaturePool`` object.
### Response:
def fetch_pool(repo_url, branch='master', reuse_existing=False):
"""Fetch a git repository from ``repo_url`` and returns a ``FeaturePool`` object."""
repo_name = get_repo_name(repo_url)
lib_dir = get_lib_dir()
pool_dir = get_pool_dir(repo_name)
print('... fetching %s ' % repo_name)
if os.path.exists(pool_dir):
if not reuse_existing:
raise Exception('ERROR: repository already exists')
else:
try:
a = call(['git', 'clone', repo_url], cwd=lib_dir)
except OSError:
raise Exception('ERROR: You probably dont have git installed: sudo apt-get install git')
if a != 0:
raise Exception('ERROR: check your repository url and credentials!')
try:
call(['git', 'checkout', branch], cwd=pool_dir)
except OSError:
raise Exception('ERROR: cannot switch branches')
print('... repository successfully cloned')
return FeaturePool(pool_dir) |
def get_numpy_include_path():
"""
Gets the path to the numpy headers.
"""
# We need to go through this nonsense in case setuptools
# downloaded and installed Numpy for us as part of the build or
# install, since Numpy may still think it's in "setup mode", when
# in fact we're ready to use it to build astropy now.
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
return numpy_include | Gets the path to the numpy headers. | Below is the the instruction that describes the task:
### Input:
Gets the path to the numpy headers.
### Response:
def get_numpy_include_path():
"""
Gets the path to the numpy headers.
"""
# We need to go through this nonsense in case setuptools
# downloaded and installed Numpy for us as part of the build or
# install, since Numpy may still think it's in "setup mode", when
# in fact we're ready to use it to build astropy now.
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
return numpy_include |
def history(location, model, filename, deployment, custom_config):
"""Generate a report over a model's git commit history."""
callbacks.git_installed()
LOGGER.info("Initialising history report generation.")
if location is None:
raise click.BadParameter("No 'location' given or configured.")
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.critical(
"The history report requires a git repository in order to check "
"the model's commit history.")
sys.exit(1)
LOGGER.info("Obtaining history of results from "
"the deployment branch {}.".format(deployment))
repo.git.checkout(deployment)
try:
manager = managers.SQLResultManager(repository=repo, location=location)
except (AttributeError, ArgumentError):
manager = managers.RepoResultManager(
repository=repo, location=location)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
LOGGER.info("Tracing the commit history.")
history = managers.HistoryManager(repository=repo, manager=manager)
history.load_history(model, skip={deployment})
LOGGER.info("Composing the history report.")
report = api.history_report(history, config=config)
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(report) | Generate a report over a model's git commit history. | Below is the the instruction that describes the task:
### Input:
Generate a report over a model's git commit history.
### Response:
def history(location, model, filename, deployment, custom_config):
"""Generate a report over a model's git commit history."""
callbacks.git_installed()
LOGGER.info("Initialising history report generation.")
if location is None:
raise click.BadParameter("No 'location' given or configured.")
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.critical(
"The history report requires a git repository in order to check "
"the model's commit history.")
sys.exit(1)
LOGGER.info("Obtaining history of results from "
"the deployment branch {}.".format(deployment))
repo.git.checkout(deployment)
try:
manager = managers.SQLResultManager(repository=repo, location=location)
except (AttributeError, ArgumentError):
manager = managers.RepoResultManager(
repository=repo, location=location)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
LOGGER.info("Tracing the commit history.")
history = managers.HistoryManager(repository=repo, manager=manager)
history.load_history(model, skip={deployment})
LOGGER.info("Composing the history report.")
report = api.history_report(history, config=config)
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(report) |
def mqtt_connected(func):
"""
MQTTClient coroutines decorator which will wait until connection before calling the decorated method.
:param func: coroutine to be called once connected
:return: coroutine result
"""
@asyncio.coroutine
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self._connected_state.is_set():
base_logger.warning("Client not connected, waiting for it")
_, pending = yield from asyncio.wait([self._connected_state.wait(), self._no_more_connections.wait()], return_when=asyncio.FIRST_COMPLETED)
for t in pending:
t.cancel()
if self._no_more_connections.is_set():
raise ClientException("Will not reconnect")
return (yield from func(self, *args, **kwargs))
return wrapper | MQTTClient coroutines decorator which will wait until connection before calling the decorated method.
:param func: coroutine to be called once connected
:return: coroutine result | Below is the the instruction that describes the task:
### Input:
MQTTClient coroutines decorator which will wait until connection before calling the decorated method.
:param func: coroutine to be called once connected
:return: coroutine result
### Response:
def mqtt_connected(func):
"""
MQTTClient coroutines decorator which will wait until connection before calling the decorated method.
:param func: coroutine to be called once connected
:return: coroutine result
"""
@asyncio.coroutine
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self._connected_state.is_set():
base_logger.warning("Client not connected, waiting for it")
_, pending = yield from asyncio.wait([self._connected_state.wait(), self._no_more_connections.wait()], return_when=asyncio.FIRST_COMPLETED)
for t in pending:
t.cancel()
if self._no_more_connections.is_set():
raise ClientException("Will not reconnect")
return (yield from func(self, *args, **kwargs))
return wrapper |
def hs_demux(sel, hsi, ls_hso):
""" [One-to-many] Demultiplexes to a list of output handshake interfaces
sel - (i) selects an output handshake interface to connect to the input
hsi - (i) input handshake tuple (ready, valid)
ls_hso - (o) list of output handshake tuples (ready, valid)
"""
N = len(ls_hso)
hsi_rdy, hsi_vld = hsi
ls_hso_rdy, ls_hso_vld = zip(*ls_hso)
ls_hso_rdy, ls_hso_vld = list(ls_hso_rdy), list(ls_hso_vld)
@always_comb
def _hsdemux():
hsi_rdy.next = 0
for i in range(N):
ls_hso_vld[i].next = 0
if i == sel:
hsi_rdy.next = ls_hso_rdy[i]
ls_hso_vld[i].next = hsi_vld
return _hsdemux | [One-to-many] Demultiplexes to a list of output handshake interfaces
sel - (i) selects an output handshake interface to connect to the input
hsi - (i) input handshake tuple (ready, valid)
ls_hso - (o) list of output handshake tuples (ready, valid) | Below is the the instruction that describes the task:
### Input:
[One-to-many] Demultiplexes to a list of output handshake interfaces
sel - (i) selects an output handshake interface to connect to the input
hsi - (i) input handshake tuple (ready, valid)
ls_hso - (o) list of output handshake tuples (ready, valid)
### Response:
def hs_demux(sel, hsi, ls_hso):
""" [One-to-many] Demultiplexes to a list of output handshake interfaces
sel - (i) selects an output handshake interface to connect to the input
hsi - (i) input handshake tuple (ready, valid)
ls_hso - (o) list of output handshake tuples (ready, valid)
"""
N = len(ls_hso)
hsi_rdy, hsi_vld = hsi
ls_hso_rdy, ls_hso_vld = zip(*ls_hso)
ls_hso_rdy, ls_hso_vld = list(ls_hso_rdy), list(ls_hso_vld)
@always_comb
def _hsdemux():
hsi_rdy.next = 0
for i in range(N):
ls_hso_vld[i].next = 0
if i == sel:
hsi_rdy.next = ls_hso_rdy[i]
ls_hso_vld[i].next = hsi_vld
return _hsdemux |
def get_relationship_targets(item_ids, relationships, id2rec):
"""Get item ID set of item IDs in a relationship target set."""
# Requirements to use this function:
# 1) item Terms must have been loaded with 'relationships'
# 2) item IDs in 'item_ids' arguement must be present in id2rec
# 3) Arg, 'relationships' must be True or an iterable
reltgt_objs_all = set()
for goid in item_ids:
obj = id2rec[goid]
for reltype, reltgt_objs_cur in obj.relationship.items():
if relationships is True or reltype in relationships:
reltgt_objs_all.update(reltgt_objs_cur)
return reltgt_objs_all | Get item ID set of item IDs in a relationship target set. | Below is the the instruction that describes the task:
### Input:
Get item ID set of item IDs in a relationship target set.
### Response:
def get_relationship_targets(item_ids, relationships, id2rec):
"""Get item ID set of item IDs in a relationship target set."""
# Requirements to use this function:
# 1) item Terms must have been loaded with 'relationships'
# 2) item IDs in 'item_ids' arguement must be present in id2rec
# 3) Arg, 'relationships' must be True or an iterable
reltgt_objs_all = set()
for goid in item_ids:
obj = id2rec[goid]
for reltype, reltgt_objs_cur in obj.relationship.items():
if relationships is True or reltype in relationships:
reltgt_objs_all.update(reltgt_objs_cur)
return reltgt_objs_all |
def simplify_script(self) -> 'Language':
"""
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
"""
if self._simplified is not None:
return self._simplified
if self.language and self.script:
if DEFAULT_SCRIPTS.get(self.language) == self.script:
result = self.update_dict({'script': None})
self._simplified = result
return self._simplified
self._simplified = self
return self._simplified | Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi') | Below is the the instruction that describes the task:
### Input:
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
### Response:
def simplify_script(self) -> 'Language':
"""
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
"""
if self._simplified is not None:
return self._simplified
if self.language and self.script:
if DEFAULT_SCRIPTS.get(self.language) == self.script:
result = self.update_dict({'script': None})
self._simplified = result
return self._simplified
self._simplified = self
return self._simplified |
def _missing_datetimes(self, finite_datetimes):
"""
Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015)
"""
try:
return self.missing_datetimes(finite_datetimes)
except TypeError as ex:
if 'missing_datetimes()' in repr(ex):
warnings.warn('In your Range* subclass, missing_datetimes() should only take 1 argument (see latest docs)')
return self.missing_datetimes(self.of_cls, finite_datetimes)
else:
raise | Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015) | Below is the the instruction that describes the task:
### Input:
Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015)
### Response:
def _missing_datetimes(self, finite_datetimes):
"""
Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015)
"""
try:
return self.missing_datetimes(finite_datetimes)
except TypeError as ex:
if 'missing_datetimes()' in repr(ex):
warnings.warn('In your Range* subclass, missing_datetimes() should only take 1 argument (see latest docs)')
return self.missing_datetimes(self.of_cls, finite_datetimes)
else:
raise |
def hotkey(*args, **kwargs):
"""Performs key down presses on the arguments passed in order, then performs
key releases in reverse order.
The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a
"Ctrl-Shift-C" hotkey/keyboard shortcut press.
Args:
key(s) (str): The series of keys to press, in order. This can also be a
list of key strings to press.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
"""
interval = float(kwargs.get('interval', 0.0))
_failSafeCheck()
for c in args:
if len(c) > 1:
c = c.lower()
platformModule._keyDown(c)
time.sleep(interval)
for c in reversed(args):
if len(c) > 1:
c = c.lower()
platformModule._keyUp(c)
time.sleep(interval)
_autoPause(kwargs.get('pause', None), kwargs.get('_pause', True)) | Performs key down presses on the arguments passed in order, then performs
key releases in reverse order.
The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a
"Ctrl-Shift-C" hotkey/keyboard shortcut press.
Args:
key(s) (str): The series of keys to press, in order. This can also be a
list of key strings to press.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Performs key down presses on the arguments passed in order, then performs
key releases in reverse order.
The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a
"Ctrl-Shift-C" hotkey/keyboard shortcut press.
Args:
key(s) (str): The series of keys to press, in order. This can also be a
list of key strings to press.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
### Response:
def hotkey(*args, **kwargs):
"""Performs key down presses on the arguments passed in order, then performs
key releases in reverse order.
The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a
"Ctrl-Shift-C" hotkey/keyboard shortcut press.
Args:
key(s) (str): The series of keys to press, in order. This can also be a
list of key strings to press.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
"""
interval = float(kwargs.get('interval', 0.0))
_failSafeCheck()
for c in args:
if len(c) > 1:
c = c.lower()
platformModule._keyDown(c)
time.sleep(interval)
for c in reversed(args):
if len(c) > 1:
c = c.lower()
platformModule._keyUp(c)
time.sleep(interval)
_autoPause(kwargs.get('pause', None), kwargs.get('_pause', True)) |
def ok(self):
"""
Returns True if OK to use, else False
"""
try:
v = int(self._value)
if v < 0:
return False
else:
return True
except:
return False | Returns True if OK to use, else False | Below is the the instruction that describes the task:
### Input:
Returns True if OK to use, else False
### Response:
def ok(self):
"""
Returns True if OK to use, else False
"""
try:
v = int(self._value)
if v < 0:
return False
else:
return True
except:
return False |
def special_case_mysql(self, u, kwargs):
"""For mysql, take max_idle out of the query arguments, and
use its value for pool_recycle. Also, force use_unicode and
charset to be True and 'utf8', failing if they were set to
anything else."""
kwargs['pool_recycle'] = int(u.query.pop('max_idle', 3600))
# default to the MyISAM storage engine
storage_engine = u.query.pop('storage_engine', 'MyISAM')
kwargs['connect_args'] = {
'init_command': 'SET default_storage_engine=%s' % storage_engine,
}
if 'use_unicode' in u.query:
if u.query['use_unicode'] != "True":
raise TypeError("Buildbot requires use_unicode=True " +
"(and adds it automatically)")
else:
u.query['use_unicode'] = True
if 'charset' in u.query:
if u.query['charset'] != "utf8":
raise TypeError("Buildbot requires charset=utf8 " +
"(and adds it automatically)")
else:
u.query['charset'] = 'utf8'
return u, kwargs, None | For mysql, take max_idle out of the query arguments, and
use its value for pool_recycle. Also, force use_unicode and
charset to be True and 'utf8', failing if they were set to
anything else. | Below is the the instruction that describes the task:
### Input:
For mysql, take max_idle out of the query arguments, and
use its value for pool_recycle. Also, force use_unicode and
charset to be True and 'utf8', failing if they were set to
anything else.
### Response:
def special_case_mysql(self, u, kwargs):
"""For mysql, take max_idle out of the query arguments, and
use its value for pool_recycle. Also, force use_unicode and
charset to be True and 'utf8', failing if they were set to
anything else."""
kwargs['pool_recycle'] = int(u.query.pop('max_idle', 3600))
# default to the MyISAM storage engine
storage_engine = u.query.pop('storage_engine', 'MyISAM')
kwargs['connect_args'] = {
'init_command': 'SET default_storage_engine=%s' % storage_engine,
}
if 'use_unicode' in u.query:
if u.query['use_unicode'] != "True":
raise TypeError("Buildbot requires use_unicode=True " +
"(and adds it automatically)")
else:
u.query['use_unicode'] = True
if 'charset' in u.query:
if u.query['charset'] != "utf8":
raise TypeError("Buildbot requires charset=utf8 " +
"(and adds it automatically)")
else:
u.query['charset'] = 'utf8'
return u, kwargs, None |
def replicate_vm_image(self, vm_image_name, regions, offer, sku, version):
'''
Replicate a VM image to multiple target locations. This operation
is only for publishers. You have to be registered as image publisher
with Microsoft Azure to be able to call this.
vm_image_name:
Specifies the name of the VM Image that is to be used for
replication
regions:
Specified a list of regions to replicate the image to
Note: The regions in the request body are not additive. If a VM
Image has already been replicated to Regions A, B, and C, and
a request is made to replicate to Regions A and D, the VM
Image will remain in Region A, will be replicated in Region D,
and will be unreplicated from Regions B and C
offer:
Specifies the publisher defined name of the offer. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.).The maximum allowed length is 64 characters.
sku:
Specifies the publisher defined name of the Sku. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.). The maximum allowed length is 64 characters.
version:
Specifies the publisher defined version of the image.
The allowed characters are digit and period.
Format: <MajorVersion>.<MinorVersion>.<Patch>
Example: '1.0.0' or '1.1.0' The 3 version number to
follow standard of most of the RPs. See http://semver.org
'''
_validate_not_none('vm_image_name', vm_image_name)
_validate_not_none('regions', regions)
_validate_not_none('offer', offer)
_validate_not_none('sku', sku)
_validate_not_none('version', version)
return self._perform_put(
self._get_replication_path_using_vm_image_name(vm_image_name),
_XmlSerializer.replicate_image_to_xml(
regions,
offer,
sku,
version
),
as_async=True,
x_ms_version='2015-04-01'
) | Replicate a VM image to multiple target locations. This operation
is only for publishers. You have to be registered as image publisher
with Microsoft Azure to be able to call this.
vm_image_name:
Specifies the name of the VM Image that is to be used for
replication
regions:
Specified a list of regions to replicate the image to
Note: The regions in the request body are not additive. If a VM
Image has already been replicated to Regions A, B, and C, and
a request is made to replicate to Regions A and D, the VM
Image will remain in Region A, will be replicated in Region D,
and will be unreplicated from Regions B and C
offer:
Specifies the publisher defined name of the offer. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.).The maximum allowed length is 64 characters.
sku:
Specifies the publisher defined name of the Sku. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.). The maximum allowed length is 64 characters.
version:
Specifies the publisher defined version of the image.
The allowed characters are digit and period.
Format: <MajorVersion>.<MinorVersion>.<Patch>
Example: '1.0.0' or '1.1.0' The 3 version number to
follow standard of most of the RPs. See http://semver.org | Below is the the instruction that describes the task:
### Input:
Replicate a VM image to multiple target locations. This operation
is only for publishers. You have to be registered as image publisher
with Microsoft Azure to be able to call this.
vm_image_name:
Specifies the name of the VM Image that is to be used for
replication
regions:
Specified a list of regions to replicate the image to
Note: The regions in the request body are not additive. If a VM
Image has already been replicated to Regions A, B, and C, and
a request is made to replicate to Regions A and D, the VM
Image will remain in Region A, will be replicated in Region D,
and will be unreplicated from Regions B and C
offer:
Specifies the publisher defined name of the offer. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.).The maximum allowed length is 64 characters.
sku:
Specifies the publisher defined name of the Sku. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.). The maximum allowed length is 64 characters.
version:
Specifies the publisher defined version of the image.
The allowed characters are digit and period.
Format: <MajorVersion>.<MinorVersion>.<Patch>
Example: '1.0.0' or '1.1.0' The 3 version number to
follow standard of most of the RPs. See http://semver.org
### Response:
def replicate_vm_image(self, vm_image_name, regions, offer, sku, version):
'''
Replicate a VM image to multiple target locations. This operation
is only for publishers. You have to be registered as image publisher
with Microsoft Azure to be able to call this.
vm_image_name:
Specifies the name of the VM Image that is to be used for
replication
regions:
Specified a list of regions to replicate the image to
Note: The regions in the request body are not additive. If a VM
Image has already been replicated to Regions A, B, and C, and
a request is made to replicate to Regions A and D, the VM
Image will remain in Region A, will be replicated in Region D,
and will be unreplicated from Regions B and C
offer:
Specifies the publisher defined name of the offer. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.).The maximum allowed length is 64 characters.
sku:
Specifies the publisher defined name of the Sku. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.). The maximum allowed length is 64 characters.
version:
Specifies the publisher defined version of the image.
The allowed characters are digit and period.
Format: <MajorVersion>.<MinorVersion>.<Patch>
Example: '1.0.0' or '1.1.0' The 3 version number to
follow standard of most of the RPs. See http://semver.org
'''
_validate_not_none('vm_image_name', vm_image_name)
_validate_not_none('regions', regions)
_validate_not_none('offer', offer)
_validate_not_none('sku', sku)
_validate_not_none('version', version)
return self._perform_put(
self._get_replication_path_using_vm_image_name(vm_image_name),
_XmlSerializer.replicate_image_to_xml(
regions,
offer,
sku,
version
),
as_async=True,
x_ms_version='2015-04-01'
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.