code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _call_marginalizevlos(self,o,**kwargs):
"""Call the DF, marginalizing over line-of-sight velocity"""
#Get d, l, vperp
l= o.ll(obs=[1.,0.,0.],ro=1.)*_DEGTORAD
vperp= o.vll(ro=1.,vo=1.,obs=[1.,0.,0.,0.,0.,0.])
R= o.R(use_physical=False)
phi= o.phi(use_physical=False)
#Get local circular velocity, projected onto the perpendicular
#direction
vcirc= R**self._beta
vcircperp= vcirc*math.cos(phi+l)
#Marginalize
alphaperp= math.pi/2.+phi+l
if not 'nsigma' in kwargs or ('nsigma' in kwargs and \
kwargs['nsigma'] is None):
nsigma= _NSIGMA
else:
nsigma= kwargs['nsigma']
kwargs.pop('nsigma',None)
sigmaR2= self.targetSigma2(R,use_physical=False)
sigmaR1= sc.sqrt(sigmaR2)
#Use the asymmetric drift equation to estimate va
va= sigmaR2/2./R**self._beta*(1./self._gamma**2.-1.
-R*self._surfaceSigmaProfile.surfacemassDerivative(R,log=True)
-R*self._surfaceSigmaProfile.sigma2Derivative(R,log=True))
if math.fabs(va) > sigmaR1: va = 0. #To avoid craziness near the center
if math.fabs(math.sin(alphaperp)) < math.sqrt(1./2.):
cosalphaperp= math.cos(alphaperp)
tanalphaperp= math.tan(alphaperp)
#we can reuse the VperpIntegrand, since it is just another angle
return integrate.quad(_marginalizeVperpIntegrandSinAlphaSmall,
-self._gamma*va/sigmaR1-nsigma,
-self._gamma*va/sigmaR1+nsigma,
args=(self,R,cosalphaperp,tanalphaperp,
vperp-vcircperp,vcirc,
sigmaR1/self._gamma),
**kwargs)[0]/math.fabs(cosalphaperp)\
*sigmaR1/self._gamma
else:
sinalphaperp= math.sin(alphaperp)
cotalphaperp= 1./math.tan(alphaperp)
#we can reuse the VperpIntegrand, since it is just another angle
return integrate.quad(_marginalizeVperpIntegrandSinAlphaLarge,
-nsigma,nsigma,
args=(self,R,sinalphaperp,cotalphaperp,
vperp-vcircperp,vcirc,sigmaR1),
**kwargs)[0]/math.fabs(sinalphaperp)*sigmaR1 | Call the DF, marginalizing over line-of-sight velocity | Below is the the instruction that describes the task:
### Input:
Call the DF, marginalizing over line-of-sight velocity
### Response:
def _call_marginalizevlos(self,o,**kwargs):
"""Call the DF, marginalizing over line-of-sight velocity"""
#Get d, l, vperp
l= o.ll(obs=[1.,0.,0.],ro=1.)*_DEGTORAD
vperp= o.vll(ro=1.,vo=1.,obs=[1.,0.,0.,0.,0.,0.])
R= o.R(use_physical=False)
phi= o.phi(use_physical=False)
#Get local circular velocity, projected onto the perpendicular
#direction
vcirc= R**self._beta
vcircperp= vcirc*math.cos(phi+l)
#Marginalize
alphaperp= math.pi/2.+phi+l
if not 'nsigma' in kwargs or ('nsigma' in kwargs and \
kwargs['nsigma'] is None):
nsigma= _NSIGMA
else:
nsigma= kwargs['nsigma']
kwargs.pop('nsigma',None)
sigmaR2= self.targetSigma2(R,use_physical=False)
sigmaR1= sc.sqrt(sigmaR2)
#Use the asymmetric drift equation to estimate va
va= sigmaR2/2./R**self._beta*(1./self._gamma**2.-1.
-R*self._surfaceSigmaProfile.surfacemassDerivative(R,log=True)
-R*self._surfaceSigmaProfile.sigma2Derivative(R,log=True))
if math.fabs(va) > sigmaR1: va = 0. #To avoid craziness near the center
if math.fabs(math.sin(alphaperp)) < math.sqrt(1./2.):
cosalphaperp= math.cos(alphaperp)
tanalphaperp= math.tan(alphaperp)
#we can reuse the VperpIntegrand, since it is just another angle
return integrate.quad(_marginalizeVperpIntegrandSinAlphaSmall,
-self._gamma*va/sigmaR1-nsigma,
-self._gamma*va/sigmaR1+nsigma,
args=(self,R,cosalphaperp,tanalphaperp,
vperp-vcircperp,vcirc,
sigmaR1/self._gamma),
**kwargs)[0]/math.fabs(cosalphaperp)\
*sigmaR1/self._gamma
else:
sinalphaperp= math.sin(alphaperp)
cotalphaperp= 1./math.tan(alphaperp)
#we can reuse the VperpIntegrand, since it is just another angle
return integrate.quad(_marginalizeVperpIntegrandSinAlphaLarge,
-nsigma,nsigma,
args=(self,R,sinalphaperp,cotalphaperp,
vperp-vcircperp,vcirc,sigmaR1),
**kwargs)[0]/math.fabs(sinalphaperp)*sigmaR1 |
def _sections_to_variance_sections(self, sections_over_time):
'''Computes the variance of corresponding sections over time.
Returns:
a list of np arrays.
'''
variance_sections = []
for i in range(len(sections_over_time[0])):
time_sections = [sections[i] for sections in sections_over_time]
variance = np.var(time_sections, axis=0)
variance_sections.append(variance)
return variance_sections | Computes the variance of corresponding sections over time.
Returns:
a list of np arrays. | Below is the the instruction that describes the task:
### Input:
Computes the variance of corresponding sections over time.
Returns:
a list of np arrays.
### Response:
def _sections_to_variance_sections(self, sections_over_time):
'''Computes the variance of corresponding sections over time.
Returns:
a list of np arrays.
'''
variance_sections = []
for i in range(len(sections_over_time[0])):
time_sections = [sections[i] for sections in sections_over_time]
variance = np.var(time_sections, axis=0)
variance_sections.append(variance)
return variance_sections |
def get_system_path():
"""Return the path that Windows will search for dlls."""
_bpath = []
if is_win:
try:
import win32api
except ImportError:
logger.warn("Cannot determine your Windows or System directories")
logger.warn("Please add them to your PATH if .dlls are not found")
logger.warn("or install http://sourceforge.net/projects/pywin32/")
else:
sysdir = win32api.GetSystemDirectory()
sysdir2 = os.path.normpath(os.path.join(sysdir, '..', 'SYSTEM'))
windir = win32api.GetWindowsDirectory()
_bpath = [sysdir, sysdir2, windir]
_bpath.extend(compat.getenv('PATH', '').split(os.pathsep))
return _bpath | Return the path that Windows will search for dlls. | Below is the the instruction that describes the task:
### Input:
Return the path that Windows will search for dlls.
### Response:
def get_system_path():
"""Return the path that Windows will search for dlls."""
_bpath = []
if is_win:
try:
import win32api
except ImportError:
logger.warn("Cannot determine your Windows or System directories")
logger.warn("Please add them to your PATH if .dlls are not found")
logger.warn("or install http://sourceforge.net/projects/pywin32/")
else:
sysdir = win32api.GetSystemDirectory()
sysdir2 = os.path.normpath(os.path.join(sysdir, '..', 'SYSTEM'))
windir = win32api.GetWindowsDirectory()
_bpath = [sysdir, sysdir2, windir]
_bpath.extend(compat.getenv('PATH', '').split(os.pathsep))
return _bpath |
def find_stack_elements(self, module, module_name="", _visited_modules=None):
"""
This function goes through the given container and returns the stack elements. Each stack
element is represented by a tuple:
( container_name, element_name, stack_element)
The tuples are returned in an array
"""
from types import ModuleType
if _visited_modules is None: _visited_modules = []
_visited_modules.append(module)
#
elements = []
for el_name in dir(module):
the_el = module.__getattribute__(el_name)
if isinstance(the_el, ModuleType):
# Recursively go into the module
if the_el in _visited_modules:
continue
elements = elements + self.find_stack_elements(the_el, module_name + el_name + ".", _visited_modules)
elif isinstance(the_el, StackElement):
# Add to list
elements.append((module_name, el_name, the_el))
return elements | This function goes through the given container and returns the stack elements. Each stack
element is represented by a tuple:
( container_name, element_name, stack_element)
The tuples are returned in an array | Below is the the instruction that describes the task:
### Input:
This function goes through the given container and returns the stack elements. Each stack
element is represented by a tuple:
( container_name, element_name, stack_element)
The tuples are returned in an array
### Response:
def find_stack_elements(self, module, module_name="", _visited_modules=None):
"""
This function goes through the given container and returns the stack elements. Each stack
element is represented by a tuple:
( container_name, element_name, stack_element)
The tuples are returned in an array
"""
from types import ModuleType
if _visited_modules is None: _visited_modules = []
_visited_modules.append(module)
#
elements = []
for el_name in dir(module):
the_el = module.__getattribute__(el_name)
if isinstance(the_el, ModuleType):
# Recursively go into the module
if the_el in _visited_modules:
continue
elements = elements + self.find_stack_elements(the_el, module_name + el_name + ".", _visited_modules)
elif isinstance(the_el, StackElement):
# Add to list
elements.append((module_name, el_name, the_el))
return elements |
def site_name(self, site_name):
"""Function that sets and checks the site name and set url.
Parameters:
site_name (str): The site name in 'SITE_LIST', default sites.
Raises:
PybooruError: When 'site_name' isn't valid.
"""
if site_name in SITE_LIST:
self.__site_name = site_name
self.__site_url = SITE_LIST[site_name]['url']
else:
raise PybooruError(
"The 'site_name' is not valid, specify a valid 'site_name'.") | Function that sets and checks the site name and set url.
Parameters:
site_name (str): The site name in 'SITE_LIST', default sites.
Raises:
PybooruError: When 'site_name' isn't valid. | Below is the the instruction that describes the task:
### Input:
Function that sets and checks the site name and set url.
Parameters:
site_name (str): The site name in 'SITE_LIST', default sites.
Raises:
PybooruError: When 'site_name' isn't valid.
### Response:
def site_name(self, site_name):
"""Function that sets and checks the site name and set url.
Parameters:
site_name (str): The site name in 'SITE_LIST', default sites.
Raises:
PybooruError: When 'site_name' isn't valid.
"""
if site_name in SITE_LIST:
self.__site_name = site_name
self.__site_url = SITE_LIST[site_name]['url']
else:
raise PybooruError(
"The 'site_name' is not valid, specify a valid 'site_name'.") |
def get_corpus(self):
"""获取语料库
Return:
corpus -- 语料库,str类型
"""
# 正向判定
corpus = []
cd = 0
tag = None
for i in range(0, self.init_corpus[0][0]):
init_unit = self.unit_raw[self.init_corpus[0][0] - i]
cdm = CDM(init_unit)
alpha = cdm.get_alpha()
if cd <= self.cd_min and cdm.NC is not 0:
tag = True
if cd > self.cd_max or cdm.NC == 0:
tag = False
if cd in range(self.cd_min + 1, self.cd_max) and cdm.NC is not 0:
if alpha > 0:
tag = True
else:
tag = False
if cdm.NC == 0:
cd += 1
else:
cd = 0
if tag == True:
corpus.append(init_unit)
elif tag == False:
if alpha < 0 or cd > self.cd_max:
break
else:
continue
corpus = list(reversed(corpus))
try:
self.index = self.init_corpus[0][0] - i + 1
except UnboundLocalError:
log('err', '正向判定完成,索引定位出错')
self.index = self.init_corpus[0][0]
# 反向判定
cd = 0
tag = None
for i in range(1, len(self.unit_raw) - self.init_corpus[0][0]):
init_unit = self.unit_raw[self.init_corpus[0][0] + i]
cdm = CDM(init_unit)
alpha = cdm.get_alpha()
if cd <= self.cd_min and cdm.NC is not 0:
tag = True
if cd > self.cd_max or cdm.NC == 0:
tag = False
if cd in range(self.cd_min + 1, self.cd_max) and cdm.NC is not 0:
if alpha > 0:
tag = True
else:
tag = False
if cdm.NC == 0:
cd += 1
else:
cd = 0
if tag == True:
corpus.append(init_unit)
elif tag == False:
if alpha < 0 or cd > self.cd_max:
break
else:
continue
log('debug', '\n获取语料库成功:【{}】\n'.format(corpus))
return ''.join(corpus) | 获取语料库
Return:
corpus -- 语料库,str类型 | Below is the the instruction that describes the task:
### Input:
获取语料库
Return:
corpus -- 语料库,str类型
### Response:
def get_corpus(self):
"""获取语料库
Return:
corpus -- 语料库,str类型
"""
# 正向判定
corpus = []
cd = 0
tag = None
for i in range(0, self.init_corpus[0][0]):
init_unit = self.unit_raw[self.init_corpus[0][0] - i]
cdm = CDM(init_unit)
alpha = cdm.get_alpha()
if cd <= self.cd_min and cdm.NC is not 0:
tag = True
if cd > self.cd_max or cdm.NC == 0:
tag = False
if cd in range(self.cd_min + 1, self.cd_max) and cdm.NC is not 0:
if alpha > 0:
tag = True
else:
tag = False
if cdm.NC == 0:
cd += 1
else:
cd = 0
if tag == True:
corpus.append(init_unit)
elif tag == False:
if alpha < 0 or cd > self.cd_max:
break
else:
continue
corpus = list(reversed(corpus))
try:
self.index = self.init_corpus[0][0] - i + 1
except UnboundLocalError:
log('err', '正向判定完成,索引定位出错')
self.index = self.init_corpus[0][0]
# 反向判定
cd = 0
tag = None
for i in range(1, len(self.unit_raw) - self.init_corpus[0][0]):
init_unit = self.unit_raw[self.init_corpus[0][0] + i]
cdm = CDM(init_unit)
alpha = cdm.get_alpha()
if cd <= self.cd_min and cdm.NC is not 0:
tag = True
if cd > self.cd_max or cdm.NC == 0:
tag = False
if cd in range(self.cd_min + 1, self.cd_max) and cdm.NC is not 0:
if alpha > 0:
tag = True
else:
tag = False
if cdm.NC == 0:
cd += 1
else:
cd = 0
if tag == True:
corpus.append(init_unit)
elif tag == False:
if alpha < 0 or cd > self.cd_max:
break
else:
continue
log('debug', '\n获取语料库成功:【{}】\n'.format(corpus))
return ''.join(corpus) |
def list_menu(self, options, title="Choose a value", message="Choose a value", default=None, **kwargs):
"""
Show a single-selection list menu
Usage: C{dialog.list_menu(options, title="Choose a value", message="Choose a value", default=None, **kwargs)}
@param options: list of options (strings) for the dialog
@param title: window title for the dialog
@param message: message displayed above the list
@param default: default value to be selected
@return: a tuple containing the exit code and user choice
@rtype: C{DialogData(int, str)}
"""
choices = []
for option in options:
if option == default:
choices.append("TRUE")
else:
choices.append("FALSE")
choices.append(option)
return self._run_zenity(
title,
["--list", "--radiolist", "--text", message, "--column", " ", "--column", "Options"] + choices,
kwargs) | Show a single-selection list menu
Usage: C{dialog.list_menu(options, title="Choose a value", message="Choose a value", default=None, **kwargs)}
@param options: list of options (strings) for the dialog
@param title: window title for the dialog
@param message: message displayed above the list
@param default: default value to be selected
@return: a tuple containing the exit code and user choice
@rtype: C{DialogData(int, str)} | Below is the the instruction that describes the task:
### Input:
Show a single-selection list menu
Usage: C{dialog.list_menu(options, title="Choose a value", message="Choose a value", default=None, **kwargs)}
@param options: list of options (strings) for the dialog
@param title: window title for the dialog
@param message: message displayed above the list
@param default: default value to be selected
@return: a tuple containing the exit code and user choice
@rtype: C{DialogData(int, str)}
### Response:
def list_menu(self, options, title="Choose a value", message="Choose a value", default=None, **kwargs):
"""
Show a single-selection list menu
Usage: C{dialog.list_menu(options, title="Choose a value", message="Choose a value", default=None, **kwargs)}
@param options: list of options (strings) for the dialog
@param title: window title for the dialog
@param message: message displayed above the list
@param default: default value to be selected
@return: a tuple containing the exit code and user choice
@rtype: C{DialogData(int, str)}
"""
choices = []
for option in options:
if option == default:
choices.append("TRUE")
else:
choices.append("FALSE")
choices.append(option)
return self._run_zenity(
title,
["--list", "--radiolist", "--text", message, "--column", " ", "--column", "Options"] + choices,
kwargs) |
def parse_extends(self):
"""
For each part, create the inheritance parts from the ' extends '
"""
# To be able to manage multiple extends, you need to
# destroy the actual node and create many nodes that have
# mono extend. The first one gets all the css rules
for _selectors, rules in self.parts.items():
if ' extends ' in _selectors:
selectors, _, parent = _selectors.partition(' extends ')
parents = parent.split('&')
del self.parts[_selectors]
for parent in parents:
new_selectors = selectors + ' extends ' + parent
self.parts.setdefault(new_selectors, [])
self.parts[new_selectors].extend(rules)
rules = []
# further rules extending other parents will be empty
cnt = 0
parents_left = True
while parents_left and cnt < 10:
cnt += 1
parents_left = False
for _selectors in self.parts.keys():
selectors, _, parent = _selectors.partition(' extends ')
if parent:
parents_left = True
if _selectors not in self.parts:
continue # Nodes might have been renamed while linking parents...
rules = self.parts[_selectors]
del self.parts[_selectors]
self.parts.setdefault(selectors, [])
self.parts[selectors].extend(rules)
parents = self.link_with_parents(parent, selectors, rules)
if parents is None:
log.warn("Parent rule not found: %s", parent)
else:
# from the parent, inherit the context and the options:
new_context = {}
new_options = {}
for parent in parents:
new_context.update(parent[CONTEXT])
new_options.update(parent[OPTIONS])
for rule in rules:
_new_context = new_context.copy()
_new_context.update(rule[CONTEXT])
rule[CONTEXT] = _new_context
_new_options = new_options.copy()
_new_options.update(rule[OPTIONS])
rule[OPTIONS] = _new_options | For each part, create the inheritance parts from the ' extends ' | Below is the the instruction that describes the task:
### Input:
For each part, create the inheritance parts from the ' extends '
### Response:
def parse_extends(self):
"""
For each part, create the inheritance parts from the ' extends '
"""
# To be able to manage multiple extends, you need to
# destroy the actual node and create many nodes that have
# mono extend. The first one gets all the css rules
for _selectors, rules in self.parts.items():
if ' extends ' in _selectors:
selectors, _, parent = _selectors.partition(' extends ')
parents = parent.split('&')
del self.parts[_selectors]
for parent in parents:
new_selectors = selectors + ' extends ' + parent
self.parts.setdefault(new_selectors, [])
self.parts[new_selectors].extend(rules)
rules = []
# further rules extending other parents will be empty
cnt = 0
parents_left = True
while parents_left and cnt < 10:
cnt += 1
parents_left = False
for _selectors in self.parts.keys():
selectors, _, parent = _selectors.partition(' extends ')
if parent:
parents_left = True
if _selectors not in self.parts:
continue # Nodes might have been renamed while linking parents...
rules = self.parts[_selectors]
del self.parts[_selectors]
self.parts.setdefault(selectors, [])
self.parts[selectors].extend(rules)
parents = self.link_with_parents(parent, selectors, rules)
if parents is None:
log.warn("Parent rule not found: %s", parent)
else:
# from the parent, inherit the context and the options:
new_context = {}
new_options = {}
for parent in parents:
new_context.update(parent[CONTEXT])
new_options.update(parent[OPTIONS])
for rule in rules:
_new_context = new_context.copy()
_new_context.update(rule[CONTEXT])
rule[CONTEXT] = _new_context
_new_options = new_options.copy()
_new_options.update(rule[OPTIONS])
rule[OPTIONS] = _new_options |
def add_sparql_line_nums(sparql):
"""
Returns a sparql query with line numbers prepended
"""
lines = sparql.split("\n")
return "\n".join(["%s %s" % (i + 1, line) for i, line in enumerate(lines)]) | Returns a sparql query with line numbers prepended | Below is the the instruction that describes the task:
### Input:
Returns a sparql query with line numbers prepended
### Response:
def add_sparql_line_nums(sparql):
"""
Returns a sparql query with line numbers prepended
"""
lines = sparql.split("\n")
return "\n".join(["%s %s" % (i + 1, line) for i, line in enumerate(lines)]) |
def _get_raw_data(self, name):
"""Find file holding data and return its content."""
# try legacy first, then hdf5
filestem = ''
for filestem, list_fvar in self._files.items():
if name in list_fvar:
break
fieldfile = self.step.sdat.filename(filestem, self.step.isnap,
force_legacy=True)
if not fieldfile.is_file():
fieldfile = self.step.sdat.filename(filestem, self.step.isnap)
parsed_data = None
if fieldfile.is_file():
parsed_data = stagyyparsers.fields(fieldfile)
elif self.step.sdat.hdf5 and self._filesh5:
for filestem, list_fvar in self._filesh5.items():
if name in list_fvar:
break
parsed_data = stagyyparsers.read_field_h5(
self.step.sdat.hdf5 / 'Data.xmf', filestem, self.step.isnap)
return list_fvar, parsed_data | Find file holding data and return its content. | Below is the the instruction that describes the task:
### Input:
Find file holding data and return its content.
### Response:
def _get_raw_data(self, name):
"""Find file holding data and return its content."""
# try legacy first, then hdf5
filestem = ''
for filestem, list_fvar in self._files.items():
if name in list_fvar:
break
fieldfile = self.step.sdat.filename(filestem, self.step.isnap,
force_legacy=True)
if not fieldfile.is_file():
fieldfile = self.step.sdat.filename(filestem, self.step.isnap)
parsed_data = None
if fieldfile.is_file():
parsed_data = stagyyparsers.fields(fieldfile)
elif self.step.sdat.hdf5 and self._filesh5:
for filestem, list_fvar in self._filesh5.items():
if name in list_fvar:
break
parsed_data = stagyyparsers.read_field_h5(
self.step.sdat.hdf5 / 'Data.xmf', filestem, self.step.isnap)
return list_fvar, parsed_data |
def dateparser(self, dformat='%d/%m/%Y'):
"""
Returns a date parser for pandas
"""
def dateparse(dates):
return [pd.datetime.strptime(d, dformat) for d in dates]
return dateparse | Returns a date parser for pandas | Below is the the instruction that describes the task:
### Input:
Returns a date parser for pandas
### Response:
def dateparser(self, dformat='%d/%m/%Y'):
"""
Returns a date parser for pandas
"""
def dateparse(dates):
return [pd.datetime.strptime(d, dformat) for d in dates]
return dateparse |
def parse_token(response):
"""
parse the responses containing the tokens
Parameters
----------
response : str
The response containing the tokens
Returns
-------
dict
The parsed tokens
"""
items = response.split("&")
items = [item.split("=") for item in items]
return {key: value for key, value in items} | parse the responses containing the tokens
Parameters
----------
response : str
The response containing the tokens
Returns
-------
dict
The parsed tokens | Below is the the instruction that describes the task:
### Input:
parse the responses containing the tokens
Parameters
----------
response : str
The response containing the tokens
Returns
-------
dict
The parsed tokens
### Response:
def parse_token(response):
"""
parse the responses containing the tokens
Parameters
----------
response : str
The response containing the tokens
Returns
-------
dict
The parsed tokens
"""
items = response.split("&")
items = [item.split("=") for item in items]
return {key: value for key, value in items} |
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout() | Rebase all branches, if possible. | Below is the the instruction that describes the task:
### Input:
Rebase all branches, if possible.
### Response:
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout() |
def register_observer(self, observer, events=None):
"""Register a listener function.
:param observer: external listener function
:param events: tuple or list of relevant events (default=None)
"""
if events is not None and not isinstance(events, (tuple, list)):
events = (events,)
if observer in self._observers:
LOG.warning("Observer '%r' already registered, overwriting for events"
" %r", observer, events)
self._observers[observer] = events | Register a listener function.
:param observer: external listener function
:param events: tuple or list of relevant events (default=None) | Below is the the instruction that describes the task:
### Input:
Register a listener function.
:param observer: external listener function
:param events: tuple or list of relevant events (default=None)
### Response:
def register_observer(self, observer, events=None):
"""Register a listener function.
:param observer: external listener function
:param events: tuple or list of relevant events (default=None)
"""
if events is not None and not isinstance(events, (tuple, list)):
events = (events,)
if observer in self._observers:
LOG.warning("Observer '%r' already registered, overwriting for events"
" %r", observer, events)
self._observers[observer] = events |
def ladderize(self, direction=0):
"""
Ladderize tree (order descendants) so that top child has fewer
descendants than the bottom child in a left to right tree plot.
To reverse this pattern use direction=1.
"""
nself = deepcopy(self)
nself.treenode.ladderize(direction=direction)
nself._fixed_order = None
nself._coords.update()
return nself | Ladderize tree (order descendants) so that top child has fewer
descendants than the bottom child in a left to right tree plot.
To reverse this pattern use direction=1. | Below is the the instruction that describes the task:
### Input:
Ladderize tree (order descendants) so that top child has fewer
descendants than the bottom child in a left to right tree plot.
To reverse this pattern use direction=1.
### Response:
def ladderize(self, direction=0):
"""
Ladderize tree (order descendants) so that top child has fewer
descendants than the bottom child in a left to right tree plot.
To reverse this pattern use direction=1.
"""
nself = deepcopy(self)
nself.treenode.ladderize(direction=direction)
nself._fixed_order = None
nself._coords.update()
return nself |
def OSPFNeighborState_originator_switch_info_switchIpV4Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(OSPFNeighborState, "originator-switch-info")
switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address")
switchIpV4Address.text = kwargs.pop('switchIpV4Address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def OSPFNeighborState_originator_switch_info_switchIpV4Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(OSPFNeighborState, "originator-switch-info")
switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address")
switchIpV4Address.text = kwargs.pop('switchIpV4Address')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def oauth_manager(self, oauth_manager):
"""Use the oauth manager to enable oauth for API
:param oauth_manager: the oauth manager
"""
@self.app.before_request
def before_request():
endpoint = request.endpoint
resource = self.app.view_functions[endpoint].view_class
if not getattr(resource, 'disable_oauth'):
scopes = request.args.get('scopes')
if getattr(resource, 'schema'):
scopes = [self.build_scope(resource, request.method)]
elif scopes:
scopes = scopes.split(',')
if scopes:
scopes = scopes.split(',')
valid, req = oauth_manager.verify_request(scopes)
for func in oauth_manager._after_request_funcs:
valid, req = func(valid, req)
if not valid:
if oauth_manager._invalid_response:
return oauth_manager._invalid_response(req)
return abort(401)
request.oauth = req | Use the oauth manager to enable oauth for API
:param oauth_manager: the oauth manager | Below is the the instruction that describes the task:
### Input:
Use the oauth manager to enable oauth for API
:param oauth_manager: the oauth manager
### Response:
def oauth_manager(self, oauth_manager):
"""Use the oauth manager to enable oauth for API
:param oauth_manager: the oauth manager
"""
@self.app.before_request
def before_request():
endpoint = request.endpoint
resource = self.app.view_functions[endpoint].view_class
if not getattr(resource, 'disable_oauth'):
scopes = request.args.get('scopes')
if getattr(resource, 'schema'):
scopes = [self.build_scope(resource, request.method)]
elif scopes:
scopes = scopes.split(',')
if scopes:
scopes = scopes.split(',')
valid, req = oauth_manager.verify_request(scopes)
for func in oauth_manager._after_request_funcs:
valid, req = func(valid, req)
if not valid:
if oauth_manager._invalid_response:
return oauth_manager._invalid_response(req)
return abort(401)
request.oauth = req |
def heightmap_clamp(hm: np.ndarray, mi: float, ma: float) -> None:
"""Clamp all values on this heightmap between ``mi`` and ``ma``
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound to clamp to.
ma (float): The upper bound to clamp to.
.. deprecated:: 2.0
Do ``hm.clip(mi, ma)`` instead.
"""
hm.clip(mi, ma) | Clamp all values on this heightmap between ``mi`` and ``ma``
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound to clamp to.
ma (float): The upper bound to clamp to.
.. deprecated:: 2.0
Do ``hm.clip(mi, ma)`` instead. | Below is the the instruction that describes the task:
### Input:
Clamp all values on this heightmap between ``mi`` and ``ma``
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound to clamp to.
ma (float): The upper bound to clamp to.
.. deprecated:: 2.0
Do ``hm.clip(mi, ma)`` instead.
### Response:
def heightmap_clamp(hm: np.ndarray, mi: float, ma: float) -> None:
"""Clamp all values on this heightmap between ``mi`` and ``ma``
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound to clamp to.
ma (float): The upper bound to clamp to.
.. deprecated:: 2.0
Do ``hm.clip(mi, ma)`` instead.
"""
hm.clip(mi, ma) |
async def _unsubscribe(self, channels, is_mask):
"""Unsubscribe from given channel."""
vanished = []
if channels:
for channel in channels:
key = channel, is_mask
self._channels.remove(key)
self._plugin._subscriptions[key].remove(self._queue)
if not self._plugin._subscriptions[key]: # we were last sub?
vanished.append(channel)
del self._plugin._subscriptions[key]
else:
while self._channels:
channel, is_mask = key = self._channels.pop()
self._plugin._subscriptions[key].remove(self._queue)
if not self._plugin._subscriptions[key]:
vanished.append(channel)
del self._plugin._subscriptions[key]
if vanished:
await getattr(self._sub, 'punsubscribe' if is_mask else 'unsubscribe')(vanished) | Unsubscribe from given channel. | Below is the the instruction that describes the task:
### Input:
Unsubscribe from given channel.
### Response:
async def _unsubscribe(self, channels, is_mask):
"""Unsubscribe from given channel."""
vanished = []
if channels:
for channel in channels:
key = channel, is_mask
self._channels.remove(key)
self._plugin._subscriptions[key].remove(self._queue)
if not self._plugin._subscriptions[key]: # we were last sub?
vanished.append(channel)
del self._plugin._subscriptions[key]
else:
while self._channels:
channel, is_mask = key = self._channels.pop()
self._plugin._subscriptions[key].remove(self._queue)
if not self._plugin._subscriptions[key]:
vanished.append(channel)
del self._plugin._subscriptions[key]
if vanished:
await getattr(self._sub, 'punsubscribe' if is_mask else 'unsubscribe')(vanished) |
def _query(profile,
action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.github.com/',
per_page=None):
'''
Make a web call to the GitHub API and deal with paginated results.
'''
if not isinstance(args, dict):
args = {}
if action:
url += action
if command:
url += '/{0}'.format(command)
log.debug('GitHub URL: %s', url)
if 'access_token' not in args.keys():
args['access_token'] = _get_config_value(profile, 'token')
if per_page and 'per_page' not in args.keys():
args['per_page'] = per_page
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
# GitHub paginates all queries when returning many items.
# Gather all data using multiple queries and handle pagination.
complete_result = []
next_page = True
page_number = ''
while next_page is True:
if page_number:
args['page'] = page_number
result = salt.utils.http.query(url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
headers=True,
status=True,
text=True,
hide_fields=['access_token'],
opts=__opts__,
)
log.debug('GitHub Response Status Code: %s',
result['status'])
if result['status'] == 200:
if isinstance(result['dict'], dict):
# If only querying for one item, such as a single issue
# The GitHub API returns a single dictionary, instead of
# A list of dictionaries. In that case, we can return.
return result['dict']
complete_result = complete_result + result['dict']
else:
raise CommandExecutionError(
'GitHub Response Error: {0}'.format(result.get('error'))
)
try:
link_info = result.get('headers').get('Link').split(',')[0]
except AttributeError:
# Only one page of data was returned; exit the loop.
next_page = False
continue
if 'next' in link_info:
# Get the 'next' page number from the Link header.
page_number = link_info.split('>')[0].split('&page=')[1]
else:
# Last page already processed; break the loop.
next_page = False
return complete_result | Make a web call to the GitHub API and deal with paginated results. | Below is the the instruction that describes the task:
### Input:
Make a web call to the GitHub API and deal with paginated results.
### Response:
def _query(profile,
action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.github.com/',
per_page=None):
'''
Make a web call to the GitHub API and deal with paginated results.
'''
if not isinstance(args, dict):
args = {}
if action:
url += action
if command:
url += '/{0}'.format(command)
log.debug('GitHub URL: %s', url)
if 'access_token' not in args.keys():
args['access_token'] = _get_config_value(profile, 'token')
if per_page and 'per_page' not in args.keys():
args['per_page'] = per_page
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
# GitHub paginates all queries when returning many items.
# Gather all data using multiple queries and handle pagination.
complete_result = []
next_page = True
page_number = ''
while next_page is True:
if page_number:
args['page'] = page_number
result = salt.utils.http.query(url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
headers=True,
status=True,
text=True,
hide_fields=['access_token'],
opts=__opts__,
)
log.debug('GitHub Response Status Code: %s',
result['status'])
if result['status'] == 200:
if isinstance(result['dict'], dict):
# If only querying for one item, such as a single issue
# The GitHub API returns a single dictionary, instead of
# A list of dictionaries. In that case, we can return.
return result['dict']
complete_result = complete_result + result['dict']
else:
raise CommandExecutionError(
'GitHub Response Error: {0}'.format(result.get('error'))
)
try:
link_info = result.get('headers').get('Link').split(',')[0]
except AttributeError:
# Only one page of data was returned; exit the loop.
next_page = False
continue
if 'next' in link_info:
# Get the 'next' page number from the Link header.
page_number = link_info.split('>')[0].split('&page=')[1]
else:
# Last page already processed; break the loop.
next_page = False
return complete_result |
def from_spcm(filepath, name=None, *, delimiter=",", parent=None, verbose=True) -> Data:
"""Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``).
If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object.
See the `spcm`__ software hompage for more info.
__ http://www.becker-hickl.com/software/spcm.htm
Parameters
----------
filepath : path-like
Path to SPC-xxx .asc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
delimiter : string (optional)
The string used to separate values. Default is ','.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.data.Data object
"""
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
# check filepath
if not ".asc" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
# parse name
if not name:
name = filepath.name.split(".")[0]
# create headers dictionary
headers = collections.OrderedDict()
header_lines = 0
ds = np.DataSource(None)
f = ds.open(filestr, "rt")
while True:
line = f.readline().strip()
header_lines += 1
if len(line) == 0:
break
else:
key, value = line.split(":", 1)
if key.strip() == "Revision":
headers["resolution"] = int(value.strip(" bits ADC"))
else:
headers[key.strip()] = value.strip()
line = f.readline().strip()
while "_BEGIN" in line:
header_lines += 1
section = line.split("_BEGIN")[0]
while True:
line = f.readline().strip()
header_lines += 1
if section + "_END" in line:
break
if section == "SYS_PARA":
use_type = {
"B": lambda b: int(b) == 1,
"C": str, # e.g. #SP [SP_OVERFL,C,N]
"F": float,
"I": int,
"L": int, # e.g. #DI [DI_MAXCNT,L,128]
"S": str,
"U": int, # unsigned int?
}
item = line[line.find("[") + 1 : line.find("]")].split(",")
key = item[0]
value = use_type[item[1]](item[2])
headers[key] = value
else:
splitted = line.split()
value = splitted[-1][1:-1].split(",")
key = " ".join(splitted[:-1])
headers[key] = value
line = f.readline().strip()
if "END" in line:
header_lines += 1
break
if "Date" in headers.keys() and "Time" in headers.keys():
# NOTE: reports created in local time, no-way to calculate absolute time
created = " ".join([headers["Date"], headers["Time"]])
created = time.strptime(created, "%Y-%m-%d %H:%M:%S")
created = timestamp.TimeStamp(time.mktime(created)).RFC3339
headers["created"] = created
# initialize data object
kwargs = {"name": name, "kind": "spcm", "source": filestr, **headers}
if parent:
data = parent.create_data(**kwargs)
else:
data = Data(**kwargs)
# import data
f.seek(0)
arr = np.genfromtxt(
f, skip_header=(header_lines + 1), skip_footer=1, delimiter=delimiter, unpack=True
)
f.close()
# construct data
data.create_variable(name="time", values=arr[0], units="ns")
data.create_channel(name="counts", values=arr[1])
data.transform("time")
# finish
if verbose:
print("data created at {0}".format(data.fullpath))
print(" kind: {0}".format(data.kind))
print(" range: {0} to {1} (ns)".format(data.time[0], data.time[-1]))
print(" size: {0}".format(data.size))
if "SP_COL_T" in data.attrs.keys():
print(" collection time: {0} sec".format(data.attrs["SP_COL_T"]))
return data | Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``).
If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object.
See the `spcm`__ software hompage for more info.
__ http://www.becker-hickl.com/software/spcm.htm
Parameters
----------
filepath : path-like
Path to SPC-xxx .asc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
delimiter : string (optional)
The string used to separate values. Default is ','.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.data.Data object | Below is the the instruction that describes the task:
### Input:
Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``).
If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object.
See the `spcm`__ software hompage for more info.
__ http://www.becker-hickl.com/software/spcm.htm
Parameters
----------
filepath : path-like
Path to SPC-xxx .asc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
delimiter : string (optional)
The string used to separate values. Default is ','.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.data.Data object
### Response:
def from_spcm(filepath, name=None, *, delimiter=",", parent=None, verbose=True) -> Data:
"""Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``).
If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object.
See the `spcm`__ software hompage for more info.
__ http://www.becker-hickl.com/software/spcm.htm
Parameters
----------
filepath : path-like
Path to SPC-xxx .asc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
delimiter : string (optional)
The string used to separate values. Default is ','.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.data.Data object
"""
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
# check filepath
if not ".asc" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
# parse name
if not name:
name = filepath.name.split(".")[0]
# create headers dictionary
headers = collections.OrderedDict()
header_lines = 0
ds = np.DataSource(None)
f = ds.open(filestr, "rt")
while True:
line = f.readline().strip()
header_lines += 1
if len(line) == 0:
break
else:
key, value = line.split(":", 1)
if key.strip() == "Revision":
headers["resolution"] = int(value.strip(" bits ADC"))
else:
headers[key.strip()] = value.strip()
line = f.readline().strip()
while "_BEGIN" in line:
header_lines += 1
section = line.split("_BEGIN")[0]
while True:
line = f.readline().strip()
header_lines += 1
if section + "_END" in line:
break
if section == "SYS_PARA":
use_type = {
"B": lambda b: int(b) == 1,
"C": str, # e.g. #SP [SP_OVERFL,C,N]
"F": float,
"I": int,
"L": int, # e.g. #DI [DI_MAXCNT,L,128]
"S": str,
"U": int, # unsigned int?
}
item = line[line.find("[") + 1 : line.find("]")].split(",")
key = item[0]
value = use_type[item[1]](item[2])
headers[key] = value
else:
splitted = line.split()
value = splitted[-1][1:-1].split(",")
key = " ".join(splitted[:-1])
headers[key] = value
line = f.readline().strip()
if "END" in line:
header_lines += 1
break
if "Date" in headers.keys() and "Time" in headers.keys():
# NOTE: reports created in local time, no-way to calculate absolute time
created = " ".join([headers["Date"], headers["Time"]])
created = time.strptime(created, "%Y-%m-%d %H:%M:%S")
created = timestamp.TimeStamp(time.mktime(created)).RFC3339
headers["created"] = created
# initialize data object
kwargs = {"name": name, "kind": "spcm", "source": filestr, **headers}
if parent:
data = parent.create_data(**kwargs)
else:
data = Data(**kwargs)
# import data
f.seek(0)
arr = np.genfromtxt(
f, skip_header=(header_lines + 1), skip_footer=1, delimiter=delimiter, unpack=True
)
f.close()
# construct data
data.create_variable(name="time", values=arr[0], units="ns")
data.create_channel(name="counts", values=arr[1])
data.transform("time")
# finish
if verbose:
print("data created at {0}".format(data.fullpath))
print(" kind: {0}".format(data.kind))
print(" range: {0} to {1} (ns)".format(data.time[0], data.time[-1]))
print(" size: {0}".format(data.size))
if "SP_COL_T" in data.attrs.keys():
print(" collection time: {0} sec".format(data.attrs["SP_COL_T"]))
return data |
def hdate(self):
"""Return the hebrew date."""
if self._last_updated == "hdate":
return self._hdate
return conv.jdn_to_hdate(self._jdn) | Return the hebrew date. | Below is the the instruction that describes the task:
### Input:
Return the hebrew date.
### Response:
def hdate(self):
"""Return the hebrew date."""
if self._last_updated == "hdate":
return self._hdate
return conv.jdn_to_hdate(self._jdn) |
def basis(self, n):
"""
Chebyshev basis functions T_n.
"""
if n == 0:
return self(np.array([1.]))
vals = np.ones(n+1)
vals[1::2] = -1
return self(vals) | Chebyshev basis functions T_n. | Below is the the instruction that describes the task:
### Input:
Chebyshev basis functions T_n.
### Response:
def basis(self, n):
"""
Chebyshev basis functions T_n.
"""
if n == 0:
return self(np.array([1.]))
vals = np.ones(n+1)
vals[1::2] = -1
return self(vals) |
def snapshot(self):
"""Return a new library item which is a copy of this one with any dynamic behavior made static."""
display_item = self.__class__()
display_item.display_type = self.display_type
# metadata
display_item._set_persistent_property_value("title", self._get_persistent_property_value("title"))
display_item._set_persistent_property_value("caption", self._get_persistent_property_value("caption"))
display_item._set_persistent_property_value("description", self._get_persistent_property_value("description"))
display_item._set_persistent_property_value("session_id", self._get_persistent_property_value("session_id"))
display_item._set_persistent_property_value("calibration_style_id", self._get_persistent_property_value("calibration_style_id"))
display_item._set_persistent_property_value("display_properties", self._get_persistent_property_value("display_properties"))
display_item.created = self.created
for graphic in self.graphics:
display_item.add_graphic(copy.deepcopy(graphic))
for display_data_channel in self.display_data_channels:
display_item.append_display_data_channel(copy.deepcopy(display_data_channel))
# this goes after the display data channels so that the layers don't get adjusted
display_item._set_persistent_property_value("display_layers", self._get_persistent_property_value("display_layers"))
return display_item | Return a new library item which is a copy of this one with any dynamic behavior made static. | Below is the the instruction that describes the task:
### Input:
Return a new library item which is a copy of this one with any dynamic behavior made static.
### Response:
def snapshot(self):
"""Return a new library item which is a copy of this one with any dynamic behavior made static."""
display_item = self.__class__()
display_item.display_type = self.display_type
# metadata
display_item._set_persistent_property_value("title", self._get_persistent_property_value("title"))
display_item._set_persistent_property_value("caption", self._get_persistent_property_value("caption"))
display_item._set_persistent_property_value("description", self._get_persistent_property_value("description"))
display_item._set_persistent_property_value("session_id", self._get_persistent_property_value("session_id"))
display_item._set_persistent_property_value("calibration_style_id", self._get_persistent_property_value("calibration_style_id"))
display_item._set_persistent_property_value("display_properties", self._get_persistent_property_value("display_properties"))
display_item.created = self.created
for graphic in self.graphics:
display_item.add_graphic(copy.deepcopy(graphic))
for display_data_channel in self.display_data_channels:
display_item.append_display_data_channel(copy.deepcopy(display_data_channel))
# this goes after the display data channels so that the layers don't get adjusted
display_item._set_persistent_property_value("display_layers", self._get_persistent_property_value("display_layers"))
return display_item |
def fuzz(self, obj):
"""
Perform the fuzzing
"""
buf = list(obj)
FuzzFactor = random.randrange(1, len(buf))
numwrites=random.randrange(math.ceil((float(len(buf)) / FuzzFactor)))+1
for j in range(numwrites):
self.random_action(buf)
return self.safe_unicode(buf) | Perform the fuzzing | Below is the the instruction that describes the task:
### Input:
Perform the fuzzing
### Response:
def fuzz(self, obj):
"""
Perform the fuzzing
"""
buf = list(obj)
FuzzFactor = random.randrange(1, len(buf))
numwrites=random.randrange(math.ceil((float(len(buf)) / FuzzFactor)))+1
for j in range(numwrites):
self.random_action(buf)
return self.safe_unicode(buf) |
def position_at_end(self, block):
"""
Position at the end of the basic *block*.
"""
self._block = block
self._anchor = len(block.instructions) | Position at the end of the basic *block*. | Below is the the instruction that describes the task:
### Input:
Position at the end of the basic *block*.
### Response:
def position_at_end(self, block):
"""
Position at the end of the basic *block*.
"""
self._block = block
self._anchor = len(block.instructions) |
def _get_transitions(self, probs, indexes, tree_idxs, batch_info, forward_steps=1, discount_factor=1.0):
""" Return batch of frames for given indexes """
if forward_steps > 1:
transition_arrays = self.backend.get_transitions_forward_steps(indexes, forward_steps, discount_factor)
else:
transition_arrays = self.backend.get_transitions(indexes)
priority_weight = self.priority_weight.value(batch_info['progress'])
# Normalize by sum of all probs
probs = probs / np.array([s.total() for s in self.backend.segment_trees], dtype=float).reshape(1, -1)
capacity = self.backend.current_size
weights = (capacity * probs) ** (-priority_weight)
weights = weights / weights.max(axis=0, keepdims=True)
transition_arrays['weights'] = weights
transition_tensors = {k: torch.from_numpy(v) for k, v in transition_arrays.items()}
transitions = Trajectories(
num_steps=indexes.shape[0],
num_envs=indexes.shape[1],
environment_information=None,
transition_tensors=transition_tensors,
rollout_tensors={},
extra_data={
'tree_idxs': tree_idxs
}
)
return transitions.to_transitions() | Return batch of frames for given indexes | Below is the the instruction that describes the task:
### Input:
Return batch of frames for given indexes
### Response:
def _get_transitions(self, probs, indexes, tree_idxs, batch_info, forward_steps=1, discount_factor=1.0):
""" Return batch of frames for given indexes """
if forward_steps > 1:
transition_arrays = self.backend.get_transitions_forward_steps(indexes, forward_steps, discount_factor)
else:
transition_arrays = self.backend.get_transitions(indexes)
priority_weight = self.priority_weight.value(batch_info['progress'])
# Normalize by sum of all probs
probs = probs / np.array([s.total() for s in self.backend.segment_trees], dtype=float).reshape(1, -1)
capacity = self.backend.current_size
weights = (capacity * probs) ** (-priority_weight)
weights = weights / weights.max(axis=0, keepdims=True)
transition_arrays['weights'] = weights
transition_tensors = {k: torch.from_numpy(v) for k, v in transition_arrays.items()}
transitions = Trajectories(
num_steps=indexes.shape[0],
num_envs=indexes.shape[1],
environment_information=None,
transition_tensors=transition_tensors,
rollout_tensors={},
extra_data={
'tree_idxs': tree_idxs
}
)
return transitions.to_transitions() |
def safe_write(filename, blob):
"""
A two-step write.
:param filename: full path
:param blob: binary data
:return: None
"""
temp_file = filename + '.saving'
with open(temp_file, 'bw') as f:
f.write(blob)
os.rename(temp_file, filename) | A two-step write.
:param filename: full path
:param blob: binary data
:return: None | Below is the the instruction that describes the task:
### Input:
A two-step write.
:param filename: full path
:param blob: binary data
:return: None
### Response:
def safe_write(filename, blob):
"""
A two-step write.
:param filename: full path
:param blob: binary data
:return: None
"""
temp_file = filename + '.saving'
with open(temp_file, 'bw') as f:
f.write(blob)
os.rename(temp_file, filename) |
def get_FEC(molecule_list, temperature, pressure, electronic_energy='Default'):
"""Returns the Gibbs free energy corrections to be added to raw reaction energies.
Parameters
----------
molecule_list : list of strings
temperature : numeric
temperature in K
pressure : numeric
pressure in mbar
Returns
-------
G_H, G_OH : Gibbs free energy of proton and hydroxide.
"""
if not temperature or not pressure:
return(0)
else:
molecule_list = [m for m in molecule_list if m != 'star']
# print(molecule_list)
FEC_sum = []
for molecule in molecule_list:
if 'gas' in molecule:
mol = GasMolecule(molecule.replace('gas', ''))
if pressure == 'Default':
p = mol.pressure
else:
p = pressure
if electronic_energy == 'Default':
ee = mol.electronic_energy
else:
ee = electronic_energy
FEC = mol.get_free_energy(temperature=temperature, pressure=p, electronic_energy = ee)
FEC_sum.append(FEC)
if 'star' in molecule:
FEC = Adsorbate(molecule.replace('star', ''))
FEC = FEC.get_helmholtz_energy(temperature=temperature)
FEC_sum.append(FEC)
FEC_sum = sum(FEC_sum)
return (FEC_sum) | Returns the Gibbs free energy corrections to be added to raw reaction energies.
Parameters
----------
molecule_list : list of strings
temperature : numeric
temperature in K
pressure : numeric
pressure in mbar
Returns
-------
G_H, G_OH : Gibbs free energy of proton and hydroxide. | Below is the the instruction that describes the task:
### Input:
Returns the Gibbs free energy corrections to be added to raw reaction energies.
Parameters
----------
molecule_list : list of strings
temperature : numeric
temperature in K
pressure : numeric
pressure in mbar
Returns
-------
G_H, G_OH : Gibbs free energy of proton and hydroxide.
### Response:
def get_FEC(molecule_list, temperature, pressure, electronic_energy='Default'):
"""Returns the Gibbs free energy corrections to be added to raw reaction energies.
Parameters
----------
molecule_list : list of strings
temperature : numeric
temperature in K
pressure : numeric
pressure in mbar
Returns
-------
G_H, G_OH : Gibbs free energy of proton and hydroxide.
"""
if not temperature or not pressure:
return(0)
else:
molecule_list = [m for m in molecule_list if m != 'star']
# print(molecule_list)
FEC_sum = []
for molecule in molecule_list:
if 'gas' in molecule:
mol = GasMolecule(molecule.replace('gas', ''))
if pressure == 'Default':
p = mol.pressure
else:
p = pressure
if electronic_energy == 'Default':
ee = mol.electronic_energy
else:
ee = electronic_energy
FEC = mol.get_free_energy(temperature=temperature, pressure=p, electronic_energy = ee)
FEC_sum.append(FEC)
if 'star' in molecule:
FEC = Adsorbate(molecule.replace('star', ''))
FEC = FEC.get_helmholtz_energy(temperature=temperature)
FEC_sum.append(FEC)
FEC_sum = sum(FEC_sum)
return (FEC_sum) |
def get_amr_line(input_f):
"""
Read the file containing AMRs. AMRs are separated by a blank line.
Each call of get_amr_line() returns the next available AMR (in one-line form).
Note: this function does not verify if the AMR is valid
"""
cur_amr = []
has_content = False
for line in input_f:
line = line.strip()
if line == "":
if not has_content:
# empty lines before current AMR
continue
else:
# end of current AMR
break
if line.strip().startswith("#"):
# ignore the comment line (starting with "#") in the AMR file
continue
else:
has_content = True
cur_amr.append(line.strip())
return "".join(cur_amr) | Read the file containing AMRs. AMRs are separated by a blank line.
Each call of get_amr_line() returns the next available AMR (in one-line form).
Note: this function does not verify if the AMR is valid | Below is the the instruction that describes the task:
### Input:
Read the file containing AMRs. AMRs are separated by a blank line.
Each call of get_amr_line() returns the next available AMR (in one-line form).
Note: this function does not verify if the AMR is valid
### Response:
def get_amr_line(input_f):
"""
Read the file containing AMRs. AMRs are separated by a blank line.
Each call of get_amr_line() returns the next available AMR (in one-line form).
Note: this function does not verify if the AMR is valid
"""
cur_amr = []
has_content = False
for line in input_f:
line = line.strip()
if line == "":
if not has_content:
# empty lines before current AMR
continue
else:
# end of current AMR
break
if line.strip().startswith("#"):
# ignore the comment line (starting with "#") in the AMR file
continue
else:
has_content = True
cur_amr.append(line.strip())
return "".join(cur_amr) |
def _maybe_replace_path(self, match):
""" Regex replacement method that will sub paths when needed """
path = match.group(0)
if self._should_replace(path):
return self._replace_path(path)
else:
return path | Regex replacement method that will sub paths when needed | Below is the the instruction that describes the task:
### Input:
Regex replacement method that will sub paths when needed
### Response:
def _maybe_replace_path(self, match):
""" Regex replacement method that will sub paths when needed """
path = match.group(0)
if self._should_replace(path):
return self._replace_path(path)
else:
return path |
def _ctypes_out(parameter):
"""Returns a parameter variable declaration for an output variable for the specified
parameter.
"""
if (parameter.dimension is not None and ":" in parameter.dimension
and "out" in parameter.direction and ("allocatable" in parameter.modifiers or
"pointer" in parameter.modifiers)):
if parameter.direction == "(inout)":
return ("type(C_PTR), intent(inout) :: {}_o".format(parameter.name), True)
else: #self.direction == "(out)" since that is the only other option.
return ("type(C_PTR), intent(inout) :: {}_c".format(parameter.name), True) | Returns a parameter variable declaration for an output variable for the specified
parameter. | Below is the the instruction that describes the task:
### Input:
Returns a parameter variable declaration for an output variable for the specified
parameter.
### Response:
def _ctypes_out(parameter):
"""Returns a parameter variable declaration for an output variable for the specified
parameter.
"""
if (parameter.dimension is not None and ":" in parameter.dimension
and "out" in parameter.direction and ("allocatable" in parameter.modifiers or
"pointer" in parameter.modifiers)):
if parameter.direction == "(inout)":
return ("type(C_PTR), intent(inout) :: {}_o".format(parameter.name), True)
else: #self.direction == "(out)" since that is the only other option.
return ("type(C_PTR), intent(inout) :: {}_c".format(parameter.name), True) |
def _index_entities(self):
''' Sets current instance's entities based on the existing index.
Note: Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents.
'''
all_ents = pd.DataFrame.from_records(
[v.entities for v in self.variables.values()])
constant = all_ents.apply(lambda x: x.nunique() == 1)
if constant.empty:
self.entities = {}
else:
keep = all_ents.columns[constant]
ents = {k: all_ents[k].dropna().iloc[0] for k in keep}
self.entities = {k: v for k, v in ents.items() if pd.notnull(v)} | Sets current instance's entities based on the existing index.
Note: Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents. | Below is the the instruction that describes the task:
### Input:
Sets current instance's entities based on the existing index.
Note: Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents.
### Response:
def _index_entities(self):
''' Sets current instance's entities based on the existing index.
Note: Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents.
'''
all_ents = pd.DataFrame.from_records(
[v.entities for v in self.variables.values()])
constant = all_ents.apply(lambda x: x.nunique() == 1)
if constant.empty:
self.entities = {}
else:
keep = all_ents.columns[constant]
ents = {k: all_ents[k].dropna().iloc[0] for k in keep}
self.entities = {k: v for k, v in ents.items() if pd.notnull(v)} |
def resolve_objects(cls, objects, skip_cached_urls=False):
"""
Make sure all AnyUrlValue objects from a set of objects is resolved in bulk.
This avoids making a query per item.
:param objects: A list or queryset of models.
:param skip_cached_urls: Whether to avoid prefetching data that has it's URL cached.
"""
# Allow the queryset or list to consist of multiple models.
# This supports querysets from django-polymorphic too.
queryset = list(objects)
any_url_values = []
for obj in queryset:
model = obj.__class__
for field in _any_url_fields_by_model[model]:
any_url_value = getattr(obj, field)
if any_url_value and any_url_value.url_type.has_id_value:
any_url_values.append(any_url_value)
AnyUrlValue.resolve_values(any_url_values, skip_cached_urls=skip_cached_urls) | Make sure all AnyUrlValue objects from a set of objects is resolved in bulk.
This avoids making a query per item.
:param objects: A list or queryset of models.
:param skip_cached_urls: Whether to avoid prefetching data that has it's URL cached. | Below is the the instruction that describes the task:
### Input:
Make sure all AnyUrlValue objects from a set of objects is resolved in bulk.
This avoids making a query per item.
:param objects: A list or queryset of models.
:param skip_cached_urls: Whether to avoid prefetching data that has it's URL cached.
### Response:
def resolve_objects(cls, objects, skip_cached_urls=False):
"""
Make sure all AnyUrlValue objects from a set of objects is resolved in bulk.
This avoids making a query per item.
:param objects: A list or queryset of models.
:param skip_cached_urls: Whether to avoid prefetching data that has it's URL cached.
"""
# Allow the queryset or list to consist of multiple models.
# This supports querysets from django-polymorphic too.
queryset = list(objects)
any_url_values = []
for obj in queryset:
model = obj.__class__
for field in _any_url_fields_by_model[model]:
any_url_value = getattr(obj, field)
if any_url_value and any_url_value.url_type.has_id_value:
any_url_values.append(any_url_value)
AnyUrlValue.resolve_values(any_url_values, skip_cached_urls=skip_cached_urls) |
def solve_buffer(self, addr, nbytes, constrain=False):
"""
Reads `nbytes` of symbolic data from a buffer in memory at `addr` and attempts to
concretize it
:param int address: Address of buffer to concretize
:param int nbytes: Size of buffer to concretize
:param bool constrain: If True, constrain the buffer to the concretized value
:return: Concrete contents of buffer
:rtype: list[int]
"""
buffer = self.cpu.read_bytes(addr, nbytes)
result = []
with self._constraints as temp_cs:
cs_to_use = self.constraints if constrain else temp_cs
for c in buffer:
result.append(self._solver.get_value(cs_to_use, c))
cs_to_use.add(c == result[-1])
return result | Reads `nbytes` of symbolic data from a buffer in memory at `addr` and attempts to
concretize it
:param int address: Address of buffer to concretize
:param int nbytes: Size of buffer to concretize
:param bool constrain: If True, constrain the buffer to the concretized value
:return: Concrete contents of buffer
:rtype: list[int] | Below is the the instruction that describes the task:
### Input:
Reads `nbytes` of symbolic data from a buffer in memory at `addr` and attempts to
concretize it
:param int address: Address of buffer to concretize
:param int nbytes: Size of buffer to concretize
:param bool constrain: If True, constrain the buffer to the concretized value
:return: Concrete contents of buffer
:rtype: list[int]
### Response:
def solve_buffer(self, addr, nbytes, constrain=False):
"""
Reads `nbytes` of symbolic data from a buffer in memory at `addr` and attempts to
concretize it
:param int address: Address of buffer to concretize
:param int nbytes: Size of buffer to concretize
:param bool constrain: If True, constrain the buffer to the concretized value
:return: Concrete contents of buffer
:rtype: list[int]
"""
buffer = self.cpu.read_bytes(addr, nbytes)
result = []
with self._constraints as temp_cs:
cs_to_use = self.constraints if constrain else temp_cs
for c in buffer:
result.append(self._solver.get_value(cs_to_use, c))
cs_to_use.add(c == result[-1])
return result |
async def send_ssh_job_info(self, job_id: BackendJobId, host: str, port: int, key: str):
"""
Send info about the SSH debug connection to the backend/client. Must be called *at most once* for each job.
:exception JobNotRunningException: is raised when the job is not running anymore (send_job_result already called)
:exception TooManyCallsException: is raised when this function has been called more than once
"""
if job_id not in self.__running_job:
raise JobNotRunningException()
if self.__running_job[job_id]:
raise TooManyCallsException()
self.__running_job[job_id] = True # now we have sent ssh info
await ZMQUtils.send(self.__backend_socket, AgentJobSSHDebug(job_id, host, port, key)) | Send info about the SSH debug connection to the backend/client. Must be called *at most once* for each job.
:exception JobNotRunningException: is raised when the job is not running anymore (send_job_result already called)
:exception TooManyCallsException: is raised when this function has been called more than once | Below is the the instruction that describes the task:
### Input:
Send info about the SSH debug connection to the backend/client. Must be called *at most once* for each job.
:exception JobNotRunningException: is raised when the job is not running anymore (send_job_result already called)
:exception TooManyCallsException: is raised when this function has been called more than once
### Response:
async def send_ssh_job_info(self, job_id: BackendJobId, host: str, port: int, key: str):
"""
Send info about the SSH debug connection to the backend/client. Must be called *at most once* for each job.
:exception JobNotRunningException: is raised when the job is not running anymore (send_job_result already called)
:exception TooManyCallsException: is raised when this function has been called more than once
"""
if job_id not in self.__running_job:
raise JobNotRunningException()
if self.__running_job[job_id]:
raise TooManyCallsException()
self.__running_job[job_id] = True # now we have sent ssh info
await ZMQUtils.send(self.__backend_socket, AgentJobSSHDebug(job_id, host, port, key)) |
def getMajorMinor(deviceName, dmsetupLs):
"""
Given output of dmsetup ls this will return
themajor:minor (block name) of the device deviceName
"""
startingIndex = string.rindex(dmsetupLs, deviceName) + len(deviceName)
endingIndex = string.index(dmsetupLs[startingIndex:], "\n") + startingIndex
# trim the preceding tab and ()'s
newStr = dmsetupLs[startingIndex + 2: endingIndex - 1]
return newStr | Given output of dmsetup ls this will return
themajor:minor (block name) of the device deviceName | Below is the the instruction that describes the task:
### Input:
Given output of dmsetup ls this will return
themajor:minor (block name) of the device deviceName
### Response:
def getMajorMinor(deviceName, dmsetupLs):
"""
Given output of dmsetup ls this will return
themajor:minor (block name) of the device deviceName
"""
startingIndex = string.rindex(dmsetupLs, deviceName) + len(deviceName)
endingIndex = string.index(dmsetupLs[startingIndex:], "\n") + startingIndex
# trim the preceding tab and ()'s
newStr = dmsetupLs[startingIndex + 2: endingIndex - 1]
return newStr |
def update(self):
"""Request an updated set of data from casper.jxml."""
response = self.jss.session.post(self.url, data=self.auth)
response_xml = ElementTree.fromstring(response.text.encode("utf_8"))
# Remove previous data, if any, and then add in response's XML.
self.clear()
for child in response_xml.getchildren():
self.append(child) | Request an updated set of data from casper.jxml. | Below is the the instruction that describes the task:
### Input:
Request an updated set of data from casper.jxml.
### Response:
def update(self):
"""Request an updated set of data from casper.jxml."""
response = self.jss.session.post(self.url, data=self.auth)
response_xml = ElementTree.fromstring(response.text.encode("utf_8"))
# Remove previous data, if any, and then add in response's XML.
self.clear()
for child in response_xml.getchildren():
self.append(child) |
def postinit(self, targets=None, value=None, type_annotation=None):
"""Do some setup after initialisation.
:param targets: What is being assigned to.
:type targets: list(NodeNG) or None
:param value: The value being assigned to the variables.
:type: NodeNG or None
"""
self.targets = targets
self.value = value
self.type_annotation = type_annotation | Do some setup after initialisation.
:param targets: What is being assigned to.
:type targets: list(NodeNG) or None
:param value: The value being assigned to the variables.
:type: NodeNG or None | Below is the the instruction that describes the task:
### Input:
Do some setup after initialisation.
:param targets: What is being assigned to.
:type targets: list(NodeNG) or None
:param value: The value being assigned to the variables.
:type: NodeNG or None
### Response:
def postinit(self, targets=None, value=None, type_annotation=None):
"""Do some setup after initialisation.
:param targets: What is being assigned to.
:type targets: list(NodeNG) or None
:param value: The value being assigned to the variables.
:type: NodeNG or None
"""
self.targets = targets
self.value = value
self.type_annotation = type_annotation |
def rem_or(self, start, end, instr, target=None, include_beyond_target=False):
"""
Find all <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found.
"""
assert(start >= 0 and end <= len(self.code) and start <= end)
try: None in instr
except: instr = [instr]
instr_offsets = []
for i in self.op_range(start, end):
op = self.code[i]
if op in instr:
if target is None:
instr_offsets.append(i)
else:
t = self.get_target(i, op)
if include_beyond_target and t >= target:
instr_offsets.append(i)
elif t == target:
instr_offsets.append(i)
pjits = self.all_instr(start, end, self.opc.PJIT)
filtered = []
for pjit in pjits:
tgt = self.get_target(pjit)-3
for i in instr_offsets:
if i <= pjit or i >= tgt:
filtered.append(i)
instr_offsets = filtered
filtered = []
return instr_offsets | Find all <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found. | Below is the the instruction that describes the task:
### Input:
Find all <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found.
### Response:
def rem_or(self, start, end, instr, target=None, include_beyond_target=False):
"""
Find all <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found.
"""
assert(start >= 0 and end <= len(self.code) and start <= end)
try: None in instr
except: instr = [instr]
instr_offsets = []
for i in self.op_range(start, end):
op = self.code[i]
if op in instr:
if target is None:
instr_offsets.append(i)
else:
t = self.get_target(i, op)
if include_beyond_target and t >= target:
instr_offsets.append(i)
elif t == target:
instr_offsets.append(i)
pjits = self.all_instr(start, end, self.opc.PJIT)
filtered = []
for pjit in pjits:
tgt = self.get_target(pjit)-3
for i in instr_offsets:
if i <= pjit or i >= tgt:
filtered.append(i)
instr_offsets = filtered
filtered = []
return instr_offsets |
def _type_size(ty):
""" Calculate `static` type size """
if ty[0] in ('int', 'uint', 'bytesM', 'function'):
return 32
elif ty[0] in ('tuple'):
result = 0
for ty_i in ty[1]:
result += ABI._type_size(ty_i)
return result
elif ty[0] in ('array'):
rep = ty[1]
result = 32 # offset link
return result
elif ty[0] in ('bytes', 'string'):
result = 32 # offset link
return result
raise ValueError | Calculate `static` type size | Below is the the instruction that describes the task:
### Input:
Calculate `static` type size
### Response:
def _type_size(ty):
""" Calculate `static` type size """
if ty[0] in ('int', 'uint', 'bytesM', 'function'):
return 32
elif ty[0] in ('tuple'):
result = 0
for ty_i in ty[1]:
result += ABI._type_size(ty_i)
return result
elif ty[0] in ('array'):
rep = ty[1]
result = 32 # offset link
return result
elif ty[0] in ('bytes', 'string'):
result = 32 # offset link
return result
raise ValueError |
def get_environment(id=None, name=None):
"""
Get a specific Environment by name or ID
"""
data = get_environment_raw(id, name)
if data:
return utils.format_json(data) | Get a specific Environment by name or ID | Below is the the instruction that describes the task:
### Input:
Get a specific Environment by name or ID
### Response:
def get_environment(id=None, name=None):
"""
Get a specific Environment by name or ID
"""
data = get_environment_raw(id, name)
if data:
return utils.format_json(data) |
def _compile_proto(full_path, dest):
'Helper to compile protobuf files'
proto_path = os.path.dirname(full_path)
protoc_args = [find_protoc(),
'--python_out={}'.format(dest),
'--proto_path={}'.format(proto_path),
full_path]
proc = subprocess.Popen(protoc_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
outs, errs = proc.communicate(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
return False
if proc.returncode != 0:
msg = 'Failed compiling "{}": \n\nstderr: {}\nstdout: {}'.format(
full_path, errs.decode('utf-8'), outs.decode('utf-8'))
raise BadProtobuf(msg)
return True | Helper to compile protobuf files | Below is the the instruction that describes the task:
### Input:
Helper to compile protobuf files
### Response:
def _compile_proto(full_path, dest):
'Helper to compile protobuf files'
proto_path = os.path.dirname(full_path)
protoc_args = [find_protoc(),
'--python_out={}'.format(dest),
'--proto_path={}'.format(proto_path),
full_path]
proc = subprocess.Popen(protoc_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
outs, errs = proc.communicate(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
return False
if proc.returncode != 0:
msg = 'Failed compiling "{}": \n\nstderr: {}\nstdout: {}'.format(
full_path, errs.decode('utf-8'), outs.decode('utf-8'))
raise BadProtobuf(msg)
return True |
def permute(self, qubits: Qubits) -> 'Gate':
"""Permute the order of the qubits"""
vec = self.vec.permute(qubits)
return Gate(vec.tensor, qubits=vec.qubits) | Permute the order of the qubits | Below is the the instruction that describes the task:
### Input:
Permute the order of the qubits
### Response:
def permute(self, qubits: Qubits) -> 'Gate':
"""Permute the order of the qubits"""
vec = self.vec.permute(qubits)
return Gate(vec.tensor, qubits=vec.qubits) |
def remove_role_from_user(self, user, role):
""" Removes role from user """
user.remove_role(role)
self.save(user)
events.user_lost_role_event.send(user, role=role) | Removes role from user | Below is the the instruction that describes the task:
### Input:
Removes role from user
### Response:
def remove_role_from_user(self, user, role):
""" Removes role from user """
user.remove_role(role)
self.save(user)
events.user_lost_role_event.send(user, role=role) |
def tabulate(
obj,
v_level_indexes=None,
h_level_indexes=None,
v_level_visibility=None,
h_level_visibility=None,
v_level_sort_keys=None,
h_level_sort_keys=None,
v_level_titles=None,
h_level_titles=None,
empty="",
):
"""Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], [])
"""
level_keys = breadth_first(obj)
v_level_indexes, h_level_indexes = validate_level_indexes(
len(level_keys), v_level_indexes, h_level_indexes
)
if v_level_visibility is None:
v_level_visibility = [True] * len(v_level_indexes)
if h_level_visibility is None:
h_level_visibility = [True] * len(h_level_indexes)
table, v_key_tuples, h_key_tuples = tabulate_body(
obj, level_keys, v_level_indexes, h_level_indexes, v_level_sort_keys, h_level_sort_keys
)
table, v_key_tuples = strip_missing_rows(table, v_key_tuples)
table, h_key_tuples = strip_missing_columns(table, h_key_tuples)
v_key_tuples = strip_hidden(v_key_tuples, v_level_visibility)
h_key_tuples = strip_hidden(h_key_tuples, h_level_visibility)
return assemble_table(
table, v_key_tuples, h_key_tuples, v_level_titles, h_level_titles, empty=empty
) | Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], []) | Below is the the instruction that describes the task:
### Input:
Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], [])
### Response:
def tabulate(
obj,
v_level_indexes=None,
h_level_indexes=None,
v_level_visibility=None,
h_level_visibility=None,
v_level_sort_keys=None,
h_level_sort_keys=None,
v_level_titles=None,
h_level_titles=None,
empty="",
):
"""Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], [])
"""
level_keys = breadth_first(obj)
v_level_indexes, h_level_indexes = validate_level_indexes(
len(level_keys), v_level_indexes, h_level_indexes
)
if v_level_visibility is None:
v_level_visibility = [True] * len(v_level_indexes)
if h_level_visibility is None:
h_level_visibility = [True] * len(h_level_indexes)
table, v_key_tuples, h_key_tuples = tabulate_body(
obj, level_keys, v_level_indexes, h_level_indexes, v_level_sort_keys, h_level_sort_keys
)
table, v_key_tuples = strip_missing_rows(table, v_key_tuples)
table, h_key_tuples = strip_missing_columns(table, h_key_tuples)
v_key_tuples = strip_hidden(v_key_tuples, v_level_visibility)
h_key_tuples = strip_hidden(h_key_tuples, h_level_visibility)
return assemble_table(
table, v_key_tuples, h_key_tuples, v_level_titles, h_level_titles, empty=empty
) |
def compute_mpnn_qkv(node_states,
total_key_depth,
total_value_depth,
num_transforms):
"""Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]).
"""
# node_states is initially a tensor with shape [B, N, D]. The call to dense
# creates a D x K kernel that serves as a fully-connected layer.
#
# For each possible batch b and node n in the first two dimensions of
# node_states, the corresponding size-D vector (the third dimension of
# node_states) is the hidden state for node n in batch b. Each of these size-D
# vectors is multiplied by the kernel to produce an attention query of size K.
# The result is a tensor of size [B, N, K] containing the attention queries
# for each node in each batch.
q = common_layers.dense(
node_states, total_key_depth, use_bias=False, name="q_mpnn")
# Creates the attention keys in a manner similar to the process of creating
# the attention queries. One key is created for each type of outgoing edge the
# corresponding node might have, meaning k will have shape [B, N, K*T].
k = _compute_edge_transforms(node_states,
total_key_depth,
num_transforms,
name="k_mpnn")
v = _compute_edge_transforms(node_states,
total_value_depth,
num_transforms,
name="v_mpnn")
return q, k, v | Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]). | Below is the the instruction that describes the task:
### Input:
Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]).
### Response:
def compute_mpnn_qkv(node_states,
total_key_depth,
total_value_depth,
num_transforms):
"""Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]).
"""
# node_states is initially a tensor with shape [B, N, D]. The call to dense
# creates a D x K kernel that serves as a fully-connected layer.
#
# For each possible batch b and node n in the first two dimensions of
# node_states, the corresponding size-D vector (the third dimension of
# node_states) is the hidden state for node n in batch b. Each of these size-D
# vectors is multiplied by the kernel to produce an attention query of size K.
# The result is a tensor of size [B, N, K] containing the attention queries
# for each node in each batch.
q = common_layers.dense(
node_states, total_key_depth, use_bias=False, name="q_mpnn")
# Creates the attention keys in a manner similar to the process of creating
# the attention queries. One key is created for each type of outgoing edge the
# corresponding node might have, meaning k will have shape [B, N, K*T].
k = _compute_edge_transforms(node_states,
total_key_depth,
num_transforms,
name="k_mpnn")
v = _compute_edge_transforms(node_states,
total_value_depth,
num_transforms,
name="v_mpnn")
return q, k, v |
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm} | This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals()) | Below is the the instruction that describes the task:
### Input:
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
### Response:
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm} |
def nasm_null_safe_mutable_data_finalizer(env, code, data):
"""
Simple data allocation strategy that expects the code to be in a writable
segment. We just append the data to the end of the code.
"""
if data or env.buffers:
# Determine length of nullify + shellcode and adjust data pointer
xor_offsets = []
masked_data = OrderedDict()
for datum, (offset, orig_datum) in six.iteritems(data):
xor_offsets.extend([
offset + i
for i, b in enumerate(six.iterbytes(datum))
if b in (0, 10, 13)
])
masked_datum = b''.join([
six.int2byte(b) if b not in (0, 10, 13)
else six.int2byte(b ^ 0xff)
for b in six.iterbytes(datum)
])
masked_data[masked_datum] = (offset, orig_datum)
if xor_offsets:
# Build code to restore NUL, \r and \n
temp_reg = env.TEMP_REG[env.target.bits]
null_code = env.reg_load(env.BL, 255) + \
env.reg_load(temp_reg, env.OFFSET_REG)
last_offset = 0
for offset in xor_offsets:
offset -= last_offset
null_code.extend(
env.reg_add(temp_reg, offset) +
['xor [%s], bl' % temp_reg]
)
last_offset += offset
code = ['\t%s' % line for line in null_code] + code
data = masked_data
code_len = len(asm('\n'.join(code), target=env.target))
adjust_ebp = env.reg_add(env.OFFSET_REG, code_len)
return [
'\tjmp __getpc1',
'__getpc0:',
'\tpop %s' % env.OFFSET_REG,
] + [
'\t%s' % line for line in adjust_ebp
] + [
'\tjmp __realstart',
'__getpc1:',
'\tcall __getpc0',
'__realstart:',
] + code + _pack_data(data)
else:
return code | Simple data allocation strategy that expects the code to be in a writable
segment. We just append the data to the end of the code. | Below is the the instruction that describes the task:
### Input:
Simple data allocation strategy that expects the code to be in a writable
segment. We just append the data to the end of the code.
### Response:
def nasm_null_safe_mutable_data_finalizer(env, code, data):
"""
Simple data allocation strategy that expects the code to be in a writable
segment. We just append the data to the end of the code.
"""
if data or env.buffers:
# Determine length of nullify + shellcode and adjust data pointer
xor_offsets = []
masked_data = OrderedDict()
for datum, (offset, orig_datum) in six.iteritems(data):
xor_offsets.extend([
offset + i
for i, b in enumerate(six.iterbytes(datum))
if b in (0, 10, 13)
])
masked_datum = b''.join([
six.int2byte(b) if b not in (0, 10, 13)
else six.int2byte(b ^ 0xff)
for b in six.iterbytes(datum)
])
masked_data[masked_datum] = (offset, orig_datum)
if xor_offsets:
# Build code to restore NUL, \r and \n
temp_reg = env.TEMP_REG[env.target.bits]
null_code = env.reg_load(env.BL, 255) + \
env.reg_load(temp_reg, env.OFFSET_REG)
last_offset = 0
for offset in xor_offsets:
offset -= last_offset
null_code.extend(
env.reg_add(temp_reg, offset) +
['xor [%s], bl' % temp_reg]
)
last_offset += offset
code = ['\t%s' % line for line in null_code] + code
data = masked_data
code_len = len(asm('\n'.join(code), target=env.target))
adjust_ebp = env.reg_add(env.OFFSET_REG, code_len)
return [
'\tjmp __getpc1',
'__getpc0:',
'\tpop %s' % env.OFFSET_REG,
] + [
'\t%s' % line for line in adjust_ebp
] + [
'\tjmp __realstart',
'__getpc1:',
'\tcall __getpc0',
'__realstart:',
] + code + _pack_data(data)
else:
return code |
def datetime_to_year_quarter(dt):
"""
Args:
dt: a datetime
Returns:
tuple of the datetime's year and quarter
"""
year = dt.year
quarter = int(math.ceil(float(dt.month)/3))
return (year, quarter) | Args:
dt: a datetime
Returns:
tuple of the datetime's year and quarter | Below is the the instruction that describes the task:
### Input:
Args:
dt: a datetime
Returns:
tuple of the datetime's year and quarter
### Response:
def datetime_to_year_quarter(dt):
"""
Args:
dt: a datetime
Returns:
tuple of the datetime's year and quarter
"""
year = dt.year
quarter = int(math.ceil(float(dt.month)/3))
return (year, quarter) |
def __allocate_neuron_patterns(self, start_iteration, stop_iteration):
"""!
@brief Allocates observation transposed matrix of neurons that is limited by specified periods of simulation.
@details Matrix where state of each neuron is denoted by zero/one in line with Heaviside function on each iteration.
@return (list) Transposed observation matrix that is limited by specified periods of simulation.
"""
pattern_matrix = []
for index_neuron in range(len(self.output[0])):
pattern_neuron = []
for iteration in range(start_iteration, stop_iteration):
pattern_neuron.append(heaviside(self.output[iteration][index_neuron]))
pattern_matrix.append(pattern_neuron)
return pattern_matrix | !
@brief Allocates observation transposed matrix of neurons that is limited by specified periods of simulation.
@details Matrix where state of each neuron is denoted by zero/one in line with Heaviside function on each iteration.
@return (list) Transposed observation matrix that is limited by specified periods of simulation. | Below is the the instruction that describes the task:
### Input:
!
@brief Allocates observation transposed matrix of neurons that is limited by specified periods of simulation.
@details Matrix where state of each neuron is denoted by zero/one in line with Heaviside function on each iteration.
@return (list) Transposed observation matrix that is limited by specified periods of simulation.
### Response:
def __allocate_neuron_patterns(self, start_iteration, stop_iteration):
"""!
@brief Allocates observation transposed matrix of neurons that is limited by specified periods of simulation.
@details Matrix where state of each neuron is denoted by zero/one in line with Heaviside function on each iteration.
@return (list) Transposed observation matrix that is limited by specified periods of simulation.
"""
pattern_matrix = []
for index_neuron in range(len(self.output[0])):
pattern_neuron = []
for iteration in range(start_iteration, stop_iteration):
pattern_neuron.append(heaviside(self.output[iteration][index_neuron]))
pattern_matrix.append(pattern_neuron)
return pattern_matrix |
def resume_session_logging(self):
"""Resume session logging."""
self._chain.ctrl.set_session_log(self.session_fd)
self.log("Session logging resumed") | Resume session logging. | Below is the the instruction that describes the task:
### Input:
Resume session logging.
### Response:
def resume_session_logging(self):
"""Resume session logging."""
self._chain.ctrl.set_session_log(self.session_fd)
self.log("Session logging resumed") |
def page(self, status=values.unset, iccid=values.unset, rate_plan=values.unset,
e_id=values.unset, sim_registration_code=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SimInstance records from the API.
Request is executed immediately
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimPage
"""
params = values.of({
'Status': status,
'Iccid': iccid,
'RatePlan': rate_plan,
'EId': e_id,
'SimRegistrationCode': sim_registration_code,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SimPage(self._version, response, self._solution) | Retrieve a single page of SimInstance records from the API.
Request is executed immediately
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimPage | Below is the the instruction that describes the task:
### Input:
Retrieve a single page of SimInstance records from the API.
Request is executed immediately
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimPage
### Response:
def page(self, status=values.unset, iccid=values.unset, rate_plan=values.unset,
e_id=values.unset, sim_registration_code=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SimInstance records from the API.
Request is executed immediately
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimPage
"""
params = values.of({
'Status': status,
'Iccid': iccid,
'RatePlan': rate_plan,
'EId': e_id,
'SimRegistrationCode': sim_registration_code,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SimPage(self._version, response, self._solution) |
def render_traceback(self, excid=None):
"""render one or all of my tracebacks to a list of lines"""
lines = []
if excid is None:
for (en,ev,etb,ei) in self.elist:
lines.append(self._get_engine_str(ei))
lines.extend((etb or 'No traceback available').splitlines())
lines.append('')
else:
try:
en,ev,etb,ei = self.elist[excid]
except:
raise IndexError("an exception with index %i does not exist"%excid)
else:
lines.append(self._get_engine_str(ei))
lines.extend((etb or 'No traceback available').splitlines())
return lines | render one or all of my tracebacks to a list of lines | Below is the the instruction that describes the task:
### Input:
render one or all of my tracebacks to a list of lines
### Response:
def render_traceback(self, excid=None):
"""render one or all of my tracebacks to a list of lines"""
lines = []
if excid is None:
for (en,ev,etb,ei) in self.elist:
lines.append(self._get_engine_str(ei))
lines.extend((etb or 'No traceback available').splitlines())
lines.append('')
else:
try:
en,ev,etb,ei = self.elist[excid]
except:
raise IndexError("an exception with index %i does not exist"%excid)
else:
lines.append(self._get_engine_str(ei))
lines.extend((etb or 'No traceback available').splitlines())
return lines |
def resolve_parameter_refs(self, input_dict, parameters):
"""
Substitute references found within the string of `Fn::Sub` intrinsic function
:param input_dict: Dictionary representing the Fn::Sub function. Must contain only one key and it should be
`Fn::Sub`. Ex: {"Fn::Sub": ...}
:param parameters: Dictionary of parameter values for substitution
:return: Resolved
"""
def do_replacement(full_ref, prop_name):
"""
Replace parameter references with actual value. Return value of this method is directly replaces the
reference structure
:param full_ref: => ${logicalId.property}
:param prop_name: => logicalId.property
:return: Either the value it resolves to. If not the original reference
"""
return parameters.get(prop_name, full_ref)
return self._handle_sub_action(input_dict, do_replacement) | Substitute references found within the string of `Fn::Sub` intrinsic function
:param input_dict: Dictionary representing the Fn::Sub function. Must contain only one key and it should be
`Fn::Sub`. Ex: {"Fn::Sub": ...}
:param parameters: Dictionary of parameter values for substitution
:return: Resolved | Below is the the instruction that describes the task:
### Input:
Substitute references found within the string of `Fn::Sub` intrinsic function
:param input_dict: Dictionary representing the Fn::Sub function. Must contain only one key and it should be
`Fn::Sub`. Ex: {"Fn::Sub": ...}
:param parameters: Dictionary of parameter values for substitution
:return: Resolved
### Response:
def resolve_parameter_refs(self, input_dict, parameters):
"""
Substitute references found within the string of `Fn::Sub` intrinsic function
:param input_dict: Dictionary representing the Fn::Sub function. Must contain only one key and it should be
`Fn::Sub`. Ex: {"Fn::Sub": ...}
:param parameters: Dictionary of parameter values for substitution
:return: Resolved
"""
def do_replacement(full_ref, prop_name):
"""
Replace parameter references with actual value. Return value of this method is directly replaces the
reference structure
:param full_ref: => ${logicalId.property}
:param prop_name: => logicalId.property
:return: Either the value it resolves to. If not the original reference
"""
return parameters.get(prop_name, full_ref)
return self._handle_sub_action(input_dict, do_replacement) |
def log_init(level):
"""Set up a logger that catches all channels and logs it to stdout.
This is used to set up logging when testing.
"""
log = logging.getLogger()
hdlr = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
log.setLevel(level) | Set up a logger that catches all channels and logs it to stdout.
This is used to set up logging when testing. | Below is the the instruction that describes the task:
### Input:
Set up a logger that catches all channels and logs it to stdout.
This is used to set up logging when testing.
### Response:
def log_init(level):
"""Set up a logger that catches all channels and logs it to stdout.
This is used to set up logging when testing.
"""
log = logging.getLogger()
hdlr = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
log.setLevel(level) |
def create(self, validated_data):
"""
Perform the enrollment for existing enterprise customer users, or create the pending objects for new users.
"""
enterprise_customer = self.context.get('enterprise_customer')
lms_user = validated_data.get('lms_user_id')
tpa_user = validated_data.get('tpa_user_id')
user_email = validated_data.get('user_email')
course_run_id = validated_data.get('course_run_id')
course_mode = validated_data.get('course_mode')
cohort = validated_data.get('cohort')
email_students = validated_data.get('email_students')
is_active = validated_data.get('is_active')
enterprise_customer_user = lms_user or tpa_user or user_email
if isinstance(enterprise_customer_user, models.EnterpriseCustomerUser):
validated_data['enterprise_customer_user'] = enterprise_customer_user
try:
if is_active:
enterprise_customer_user.enroll(course_run_id, course_mode, cohort=cohort)
else:
enterprise_customer_user.unenroll(course_run_id)
except (CourseEnrollmentDowngradeError, CourseEnrollmentPermissionError, HttpClientError) as exc:
validated_data['detail'] = str(exc)
return validated_data
if is_active:
track_enrollment('enterprise-customer-enrollment-api', enterprise_customer_user.user_id, course_run_id)
else:
if is_active:
enterprise_customer_user = enterprise_customer.enroll_user_pending_registration(
user_email,
course_mode,
course_run_id,
cohort=cohort
)
else:
enterprise_customer.clear_pending_registration(user_email, course_run_id)
if email_students:
enterprise_customer.notify_enrolled_learners(
self.context.get('request_user'),
course_run_id,
[enterprise_customer_user]
)
validated_data['detail'] = 'success'
return validated_data | Perform the enrollment for existing enterprise customer users, or create the pending objects for new users. | Below is the the instruction that describes the task:
### Input:
Perform the enrollment for existing enterprise customer users, or create the pending objects for new users.
### Response:
def create(self, validated_data):
"""
Perform the enrollment for existing enterprise customer users, or create the pending objects for new users.
"""
enterprise_customer = self.context.get('enterprise_customer')
lms_user = validated_data.get('lms_user_id')
tpa_user = validated_data.get('tpa_user_id')
user_email = validated_data.get('user_email')
course_run_id = validated_data.get('course_run_id')
course_mode = validated_data.get('course_mode')
cohort = validated_data.get('cohort')
email_students = validated_data.get('email_students')
is_active = validated_data.get('is_active')
enterprise_customer_user = lms_user or tpa_user or user_email
if isinstance(enterprise_customer_user, models.EnterpriseCustomerUser):
validated_data['enterprise_customer_user'] = enterprise_customer_user
try:
if is_active:
enterprise_customer_user.enroll(course_run_id, course_mode, cohort=cohort)
else:
enterprise_customer_user.unenroll(course_run_id)
except (CourseEnrollmentDowngradeError, CourseEnrollmentPermissionError, HttpClientError) as exc:
validated_data['detail'] = str(exc)
return validated_data
if is_active:
track_enrollment('enterprise-customer-enrollment-api', enterprise_customer_user.user_id, course_run_id)
else:
if is_active:
enterprise_customer_user = enterprise_customer.enroll_user_pending_registration(
user_email,
course_mode,
course_run_id,
cohort=cohort
)
else:
enterprise_customer.clear_pending_registration(user_email, course_run_id)
if email_students:
enterprise_customer.notify_enrolled_learners(
self.context.get('request_user'),
course_run_id,
[enterprise_customer_user]
)
validated_data['detail'] = 'success'
return validated_data |
def get_metric(self, slug):
"""Get the current values for a metric.
Returns a dictionary with metric values accumulated for the seconds,
minutes, hours, day, week, month, and year.
"""
results = OrderedDict()
granularities = self._granularities()
keys = self._build_keys(slug)
for granularity, key in zip(granularities, keys):
results[granularity] = self.r.get(key)
return results | Get the current values for a metric.
Returns a dictionary with metric values accumulated for the seconds,
minutes, hours, day, week, month, and year. | Below is the the instruction that describes the task:
### Input:
Get the current values for a metric.
Returns a dictionary with metric values accumulated for the seconds,
minutes, hours, day, week, month, and year.
### Response:
def get_metric(self, slug):
"""Get the current values for a metric.
Returns a dictionary with metric values accumulated for the seconds,
minutes, hours, day, week, month, and year.
"""
results = OrderedDict()
granularities = self._granularities()
keys = self._build_keys(slug)
for granularity, key in zip(granularities, keys):
results[granularity] = self.r.get(key)
return results |
def _init_polling(self):
"""
Bootstrap polling for throttler.
To avoid spiky traffic from throttler clients, we use a random delay
before the first poll.
"""
with self.lock:
if not self.running:
return
r = random.Random()
delay = r.random() * self.refresh_interval
self.channel.io_loop.call_later(
delay=delay, callback=self._delayed_polling)
self.logger.info(
'Delaying throttling credit polling by %d sec', delay) | Bootstrap polling for throttler.
To avoid spiky traffic from throttler clients, we use a random delay
before the first poll. | Below is the the instruction that describes the task:
### Input:
Bootstrap polling for throttler.
To avoid spiky traffic from throttler clients, we use a random delay
before the first poll.
### Response:
def _init_polling(self):
"""
Bootstrap polling for throttler.
To avoid spiky traffic from throttler clients, we use a random delay
before the first poll.
"""
with self.lock:
if not self.running:
return
r = random.Random()
delay = r.random() * self.refresh_interval
self.channel.io_loop.call_later(
delay=delay, callback=self._delayed_polling)
self.logger.info(
'Delaying throttling credit polling by %d sec', delay) |
def _aix_cpudata():
'''
Return CPU information for AIX systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cmd = salt.utils.path.which('prtconf')
if cmd:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('cpuarch', r'(?im)^\s*Processor\s+Type:\s+(\S+)'),
('cpu_flags', r'(?im)^\s*Processor\s+Version:\s+(\S+)'),
('cpu_model', r'(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)'),
('num_cpus', r'(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
return grains | Return CPU information for AIX systems | Below is the the instruction that describes the task:
### Input:
Return CPU information for AIX systems
### Response:
def _aix_cpudata():
'''
Return CPU information for AIX systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cmd = salt.utils.path.which('prtconf')
if cmd:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('cpuarch', r'(?im)^\s*Processor\s+Type:\s+(\S+)'),
('cpu_flags', r'(?im)^\s*Processor\s+Version:\s+(\S+)'),
('cpu_model', r'(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)'),
('num_cpus', r'(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
return grains |
def team_scores(self, team_scores, time, show_datetime, use_12_hour_format):
"""Prints the teams scores in a pretty format"""
for score in team_scores["matches"]:
if score["status"] == "FINISHED":
click.secho("%s\t" % score["utcDate"].split('T')[0],
fg=self.colors.TIME, nl=False)
self.scores(self.parse_result(score))
elif show_datetime:
self.scores(self.parse_result(score), add_new_line=False)
click.secho(' %s' % Stdout.utc_to_local(score["utcDate"],
use_12_hour_format,
show_datetime),
fg=self.colors.TIME) | Prints the teams scores in a pretty format | Below is the the instruction that describes the task:
### Input:
Prints the teams scores in a pretty format
### Response:
def team_scores(self, team_scores, time, show_datetime, use_12_hour_format):
"""Prints the teams scores in a pretty format"""
for score in team_scores["matches"]:
if score["status"] == "FINISHED":
click.secho("%s\t" % score["utcDate"].split('T')[0],
fg=self.colors.TIME, nl=False)
self.scores(self.parse_result(score))
elif show_datetime:
self.scores(self.parse_result(score), add_new_line=False)
click.secho(' %s' % Stdout.utc_to_local(score["utcDate"],
use_12_hour_format,
show_datetime),
fg=self.colors.TIME) |
def _parse_dependencies(string):
"""
This function actually parses the dependencies are sorts them into
the buildable and given dependencies
"""
contents = _get_contents_between(string, '(', ')')
unsorted_dependencies = contents.split(',')
_check_parameters(unsorted_dependencies, ('?',))
buildable_dependencies = []
given_dependencies = []
for dependency in unsorted_dependencies:
if dependency[0] == '?':
given_dependencies.append(dependency[1:])
else:
buildable_dependencies.append(dependency)
string = string[string.index(')') + 1:]
return buildable_dependencies, given_dependencies, string | This function actually parses the dependencies are sorts them into
the buildable and given dependencies | Below is the the instruction that describes the task:
### Input:
This function actually parses the dependencies are sorts them into
the buildable and given dependencies
### Response:
def _parse_dependencies(string):
"""
This function actually parses the dependencies are sorts them into
the buildable and given dependencies
"""
contents = _get_contents_between(string, '(', ')')
unsorted_dependencies = contents.split(',')
_check_parameters(unsorted_dependencies, ('?',))
buildable_dependencies = []
given_dependencies = []
for dependency in unsorted_dependencies:
if dependency[0] == '?':
given_dependencies.append(dependency[1:])
else:
buildable_dependencies.append(dependency)
string = string[string.index(')') + 1:]
return buildable_dependencies, given_dependencies, string |
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm') | Below is the the instruction that describes the task:
### Input:
Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
### Response:
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) |
def parse_line(line):
"""
Parses a byte string like:
PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n
to a `ProxyInfo`.
"""
if not line.startswith(b'PROXY'):
raise exc.InvalidLine('Missing "PROXY" prefix', line)
if not line.endswith(CRLF):
raise exc.InvalidLine('Missing "\\r\\n" terminal', line)
parts = line[:-len(CRLF)].split(b' ')
if len(parts) != 6:
raise exc.InvalidLine('Expected 6 " " delimited parts', line)
inet, src_addr, dst_addr = parts[1:4]
if inet == b'TCP4':
try:
socket.inet_pton(socket.AF_INET, src_addr)
socket.inet_pton(socket.AF_INET, dst_addr)
except socket.error:
raise exc.InvalidLine('Invalid INET {0} address(es)'.format(inet), line)
elif inet == b'TCP6':
try:
socket.inet_pton(socket.AF_INET6, src_addr)
socket.inet_pton(socket.AF_INET6, dst_addr)
except socket.error:
raise exc.InvalidLine('Invalid INET {0} address(es)'.format(inet), line)
else:
raise exc.InvalidLine('Unsupported INET "{0}"'.format(inet), line)
try:
src_port = int(parts[4])
dst_port = int(parts[5])
except (TypeError, ValueError):
raise exc.InvalidLine(line, 'Invalid port')
return ProxyInfo(
source_address=src_addr,
source_port=src_port,
destination_address=dst_addr,
destination_port=dst_port,
) | Parses a byte string like:
PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n
to a `ProxyInfo`. | Below is the the instruction that describes the task:
### Input:
Parses a byte string like:
PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n
to a `ProxyInfo`.
### Response:
def parse_line(line):
"""
Parses a byte string like:
PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n
to a `ProxyInfo`.
"""
if not line.startswith(b'PROXY'):
raise exc.InvalidLine('Missing "PROXY" prefix', line)
if not line.endswith(CRLF):
raise exc.InvalidLine('Missing "\\r\\n" terminal', line)
parts = line[:-len(CRLF)].split(b' ')
if len(parts) != 6:
raise exc.InvalidLine('Expected 6 " " delimited parts', line)
inet, src_addr, dst_addr = parts[1:4]
if inet == b'TCP4':
try:
socket.inet_pton(socket.AF_INET, src_addr)
socket.inet_pton(socket.AF_INET, dst_addr)
except socket.error:
raise exc.InvalidLine('Invalid INET {0} address(es)'.format(inet), line)
elif inet == b'TCP6':
try:
socket.inet_pton(socket.AF_INET6, src_addr)
socket.inet_pton(socket.AF_INET6, dst_addr)
except socket.error:
raise exc.InvalidLine('Invalid INET {0} address(es)'.format(inet), line)
else:
raise exc.InvalidLine('Unsupported INET "{0}"'.format(inet), line)
try:
src_port = int(parts[4])
dst_port = int(parts[5])
except (TypeError, ValueError):
raise exc.InvalidLine(line, 'Invalid port')
return ProxyInfo(
source_address=src_addr,
source_port=src_port,
destination_address=dst_addr,
destination_port=dst_port,
) |
def run(self, ds, skip_checks, *checker_names):
"""
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
"""
ret_val = {}
checkers = self._get_valid_checkers(ds, checker_names)
if skip_checks is not None:
skip_check_dict = CheckSuite._process_skip_checks(skip_checks)
else:
skip_check_dict = defaultdict(lambda: None)
if len(checkers) == 0:
print("No valid checkers found for tests '{}'".format(",".join(checker_names)))
for checker_name, checker_class in checkers:
checker = checker_class() # instantiate a Checker object
checker.setup(ds) # setup method to prep
checks = self._get_checks(checker, skip_check_dict)
vals = []
errs = {} # check method name -> (exc, traceback)
for c, max_level in checks:
try:
vals.extend(self._run_check(c, ds, max_level))
except Exception as e:
errs[c.__func__.__name__] = (e, sys.exc_info()[2])
# score the results we got back
groups = self.scores(vals)
ret_val[checker_name] = groups, errs
return ret_val | Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks. | Below is the the instruction that describes the task:
### Input:
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
### Response:
def run(self, ds, skip_checks, *checker_names):
"""
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
"""
ret_val = {}
checkers = self._get_valid_checkers(ds, checker_names)
if skip_checks is not None:
skip_check_dict = CheckSuite._process_skip_checks(skip_checks)
else:
skip_check_dict = defaultdict(lambda: None)
if len(checkers) == 0:
print("No valid checkers found for tests '{}'".format(",".join(checker_names)))
for checker_name, checker_class in checkers:
checker = checker_class() # instantiate a Checker object
checker.setup(ds) # setup method to prep
checks = self._get_checks(checker, skip_check_dict)
vals = []
errs = {} # check method name -> (exc, traceback)
for c, max_level in checks:
try:
vals.extend(self._run_check(c, ds, max_level))
except Exception as e:
errs[c.__func__.__name__] = (e, sys.exc_info()[2])
# score the results we got back
groups = self.scores(vals)
ret_val[checker_name] = groups, errs
return ret_val |
def get_symbol(network, num_classes, from_layers, num_filters, sizes, ratios,
strides, pads, normalizations=-1, steps=[], min_filter=128,
nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):
"""Build network for testing SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
"""
body = import_module(network).get_symbol(num_classes, **kwargs)
layers = multi_layer_feature(body, from_layers, num_filters, strides, pads,
min_filter=min_filter)
loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_filters, clip=False, interm_layer=0, steps=steps)
cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out | Build network for testing SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol | Below is the the instruction that describes the task:
### Input:
Build network for testing SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
### Response:
def get_symbol(network, num_classes, from_layers, num_filters, sizes, ratios,
strides, pads, normalizations=-1, steps=[], min_filter=128,
nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):
"""Build network for testing SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
"""
body = import_module(network).get_symbol(num_classes, **kwargs)
layers = multi_layer_feature(body, from_layers, num_filters, strides, pads,
min_filter=min_filter)
loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_filters, clip=False, interm_layer=0, steps=steps)
cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out |
def new_task(func):
"""
Runs the decorated function in a new task
"""
@wraps(func)
async def wrapper(self, *args, **kwargs):
loop = get_event_loop()
loop.create_task(func(self, *args, **kwargs))
return wrapper | Runs the decorated function in a new task | Below is the the instruction that describes the task:
### Input:
Runs the decorated function in a new task
### Response:
def new_task(func):
"""
Runs the decorated function in a new task
"""
@wraps(func)
async def wrapper(self, *args, **kwargs):
loop = get_event_loop()
loop.create_task(func(self, *args, **kwargs))
return wrapper |
def send_invoice_email(self, invoice_id, email_dict):
"""
Sends an invoice by email
If you want to send your email to more than one persons do:
'recipients': {'to': ['[email protected]', '[email protected]']}}
:param invoice_id: the invoice id
:param email_dict: the email dict
:return dict
"""
return self._create_post_request(
resource=INVOICES,
billomat_id=invoice_id,
send_data=email_dict,
command=EMAIL,
) | Sends an invoice by email
If you want to send your email to more than one persons do:
'recipients': {'to': ['[email protected]', '[email protected]']}}
:param invoice_id: the invoice id
:param email_dict: the email dict
:return dict | Below is the the instruction that describes the task:
### Input:
Sends an invoice by email
If you want to send your email to more than one persons do:
'recipients': {'to': ['[email protected]', '[email protected]']}}
:param invoice_id: the invoice id
:param email_dict: the email dict
:return dict
### Response:
def send_invoice_email(self, invoice_id, email_dict):
"""
Sends an invoice by email
If you want to send your email to more than one persons do:
'recipients': {'to': ['[email protected]', '[email protected]']}}
:param invoice_id: the invoice id
:param email_dict: the email dict
:return dict
"""
return self._create_post_request(
resource=INVOICES,
billomat_id=invoice_id,
send_data=email_dict,
command=EMAIL,
) |
def _parse_argv(argv=copy(sys.argv)):
"""return argv as a parsed dictionary, looks like the following:
app --option1 likethis --option2 likethat --flag
->
{'option1': 'likethis', 'option2': 'likethat', 'flag': True}
"""
cfg = DotDict()
cfg_files = []
argv = argv[1:] # Skip command name
while argv:
arg = argv.pop(0)
# split up arg in format --arg=val
key_val = re.split('=| ', arg)
arg = key_val[0]
try:
val = key_val[1]
except IndexError:
if len(argv) > 0 and argv[0][0] != '-':
val = argv.pop(0)
else:
# No val available, probably a flag
val = None
if arg[0] == '-':
key = arg.lstrip('-')
if not val:
val = True
new_cfg = _dict_from_dotted(key, val)
cfg = dict_merge(cfg, new_cfg)
else:
if arg.endswith(".yml"):
cfg_files.append(arg)
return cfg, cfg_files | return argv as a parsed dictionary, looks like the following:
app --option1 likethis --option2 likethat --flag
->
{'option1': 'likethis', 'option2': 'likethat', 'flag': True} | Below is the the instruction that describes the task:
### Input:
return argv as a parsed dictionary, looks like the following:
app --option1 likethis --option2 likethat --flag
->
{'option1': 'likethis', 'option2': 'likethat', 'flag': True}
### Response:
def _parse_argv(argv=copy(sys.argv)):
"""return argv as a parsed dictionary, looks like the following:
app --option1 likethis --option2 likethat --flag
->
{'option1': 'likethis', 'option2': 'likethat', 'flag': True}
"""
cfg = DotDict()
cfg_files = []
argv = argv[1:] # Skip command name
while argv:
arg = argv.pop(0)
# split up arg in format --arg=val
key_val = re.split('=| ', arg)
arg = key_val[0]
try:
val = key_val[1]
except IndexError:
if len(argv) > 0 and argv[0][0] != '-':
val = argv.pop(0)
else:
# No val available, probably a flag
val = None
if arg[0] == '-':
key = arg.lstrip('-')
if not val:
val = True
new_cfg = _dict_from_dotted(key, val)
cfg = dict_merge(cfg, new_cfg)
else:
if arg.endswith(".yml"):
cfg_files.append(arg)
return cfg, cfg_files |
def brown(num_points=1024, b2=1.0, fs=1.0):
""" Brownian or random walk (diffusion) noise with 1/f^2 PSD
(not really a color... rather Brownian or random-walk)
N = number of samples
b2 = desired PSD is b2*f^-2
fs = sampling frequency
we integrate white-noise to get Brownian noise.
"""
return (1.0/float(fs))*numpy.cumsum(white(num_points, b0=b2*(4.0*math.pi*math.pi), fs=fs)) | Brownian or random walk (diffusion) noise with 1/f^2 PSD
(not really a color... rather Brownian or random-walk)
N = number of samples
b2 = desired PSD is b2*f^-2
fs = sampling frequency
we integrate white-noise to get Brownian noise. | Below is the the instruction that describes the task:
### Input:
Brownian or random walk (diffusion) noise with 1/f^2 PSD
(not really a color... rather Brownian or random-walk)
N = number of samples
b2 = desired PSD is b2*f^-2
fs = sampling frequency
we integrate white-noise to get Brownian noise.
### Response:
def brown(num_points=1024, b2=1.0, fs=1.0):
""" Brownian or random walk (diffusion) noise with 1/f^2 PSD
(not really a color... rather Brownian or random-walk)
N = number of samples
b2 = desired PSD is b2*f^-2
fs = sampling frequency
we integrate white-noise to get Brownian noise.
"""
return (1.0/float(fs))*numpy.cumsum(white(num_points, b0=b2*(4.0*math.pi*math.pi), fs=fs)) |
def feature_types(self):
"""Distinct types (``type_``) in :class:`.models.Feature`
:return: all distinct feature types
:rtype: list[str]
"""
r = self.session.query(distinct(models.Feature.type_)).all()
return [x[0] for x in r] | Distinct types (``type_``) in :class:`.models.Feature`
:return: all distinct feature types
:rtype: list[str] | Below is the the instruction that describes the task:
### Input:
Distinct types (``type_``) in :class:`.models.Feature`
:return: all distinct feature types
:rtype: list[str]
### Response:
def feature_types(self):
"""Distinct types (``type_``) in :class:`.models.Feature`
:return: all distinct feature types
:rtype: list[str]
"""
r = self.session.query(distinct(models.Feature.type_)).all()
return [x[0] for x in r] |
def to_dict(self, labels=None):
"""Convert LogEvent object to a dictionary."""
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output | Convert LogEvent object to a dictionary. | Below is the the instruction that describes the task:
### Input:
Convert LogEvent object to a dictionary.
### Response:
def to_dict(self, labels=None):
"""Convert LogEvent object to a dictionary."""
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output |
def get(tree, name):
""" Return a float value attribute NAME from TREE.
"""
if name in tree:
value = tree[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a | Return a float value attribute NAME from TREE. | Below is the the instruction that describes the task:
### Input:
Return a float value attribute NAME from TREE.
### Response:
def get(tree, name):
""" Return a float value attribute NAME from TREE.
"""
if name in tree:
value = tree[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a |
def _generate_time_steps(self, trajectory_list):
"""A generator to yield single time-steps from a list of trajectories."""
for single_trajectory in trajectory_list:
assert isinstance(single_trajectory, trajectory.Trajectory)
# Skip writing trajectories that have only a single time-step -- this
# could just be a repeated reset.
if single_trajectory.num_time_steps <= 1:
continue
for index, time_step in enumerate(single_trajectory.time_steps):
# The first time-step doesn't have reward/processed_reward, if so, just
# setting it to 0.0 / 0 should be OK.
raw_reward = time_step.raw_reward
if not raw_reward:
raw_reward = 0.0
processed_reward = time_step.processed_reward
if not processed_reward:
processed_reward = 0
action = time_step.action
if action is None:
# The last time-step doesn't have action, and this action shouldn't be
# used, gym's spaces have a `sample` function, so let's just sample an
# action and use that.
action = self.action_space.sample()
action = gym_spaces_utils.gym_space_encode(self.action_space, action)
if six.PY3:
# py3 complains that, to_example cannot handle np.int64 !
action_dtype = self.action_space.dtype
if action_dtype in [np.int64, np.int32]:
action = list(map(int, action))
elif action_dtype in [np.float64, np.float32]:
action = list(map(float, action))
# same with processed_reward.
processed_reward = int(processed_reward)
assert time_step.observation is not None
yield {
TIMESTEP_FIELD: [index],
ACTION_FIELD:
action,
# to_example errors on np.float32
RAW_REWARD_FIELD: [float(raw_reward)],
PROCESSED_REWARD_FIELD: [processed_reward],
# to_example doesn't know bools
DONE_FIELD: [int(time_step.done)],
OBSERVATION_FIELD:
gym_spaces_utils.gym_space_encode(self.observation_space,
time_step.observation),
} | A generator to yield single time-steps from a list of trajectories. | Below is the the instruction that describes the task:
### Input:
A generator to yield single time-steps from a list of trajectories.
### Response:
def _generate_time_steps(self, trajectory_list):
"""A generator to yield single time-steps from a list of trajectories."""
for single_trajectory in trajectory_list:
assert isinstance(single_trajectory, trajectory.Trajectory)
# Skip writing trajectories that have only a single time-step -- this
# could just be a repeated reset.
if single_trajectory.num_time_steps <= 1:
continue
for index, time_step in enumerate(single_trajectory.time_steps):
# The first time-step doesn't have reward/processed_reward, if so, just
# setting it to 0.0 / 0 should be OK.
raw_reward = time_step.raw_reward
if not raw_reward:
raw_reward = 0.0
processed_reward = time_step.processed_reward
if not processed_reward:
processed_reward = 0
action = time_step.action
if action is None:
# The last time-step doesn't have action, and this action shouldn't be
# used, gym's spaces have a `sample` function, so let's just sample an
# action and use that.
action = self.action_space.sample()
action = gym_spaces_utils.gym_space_encode(self.action_space, action)
if six.PY3:
# py3 complains that, to_example cannot handle np.int64 !
action_dtype = self.action_space.dtype
if action_dtype in [np.int64, np.int32]:
action = list(map(int, action))
elif action_dtype in [np.float64, np.float32]:
action = list(map(float, action))
# same with processed_reward.
processed_reward = int(processed_reward)
assert time_step.observation is not None
yield {
TIMESTEP_FIELD: [index],
ACTION_FIELD:
action,
# to_example errors on np.float32
RAW_REWARD_FIELD: [float(raw_reward)],
PROCESSED_REWARD_FIELD: [processed_reward],
# to_example doesn't know bools
DONE_FIELD: [int(time_step.done)],
OBSERVATION_FIELD:
gym_spaces_utils.gym_space_encode(self.observation_space,
time_step.observation),
} |
def set_schedule(self, zone_info):
"""Sets the schedule for this zone"""
# must only POST json, otherwise server API handler raises exceptions
try:
json.loads(zone_info)
except ValueError as error:
raise ValueError("zone_info must be valid JSON: ", error)
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
response = requests.put(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/%s/%s/schedule" % (self.zone_type, self.zoneId),
data=zone_info, headers=headers
)
response.raise_for_status()
return response.json() | Sets the schedule for this zone | Below is the the instruction that describes the task:
### Input:
Sets the schedule for this zone
### Response:
def set_schedule(self, zone_info):
"""Sets the schedule for this zone"""
# must only POST json, otherwise server API handler raises exceptions
try:
json.loads(zone_info)
except ValueError as error:
raise ValueError("zone_info must be valid JSON: ", error)
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
response = requests.put(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/%s/%s/schedule" % (self.zone_type, self.zoneId),
data=zone_info, headers=headers
)
response.raise_for_status()
return response.json() |
def check_hash(path, file_hash):
'''
Check if a file matches the given hash string
Returns ``True`` if the hash matches, otherwise ``False``.
path
Path to a file local to the minion.
hash
The hash to check against the file specified in the ``path`` argument.
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>=<hash_value>`` (e.g.
``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
'''
path = os.path.expanduser(path)
if not isinstance(file_hash, six.string_types):
raise SaltInvocationError('hash must be a string')
for sep in (':', '='):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
'Hash {0} (length: {1}) could not be matched to a supported '
'hash type. The supported hash types and lengths are: '
'{2}'.format(
file_hash,
hash_len,
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
return get_hash(path, hash_type) == hash_value | Check if a file matches the given hash string
Returns ``True`` if the hash matches, otherwise ``False``.
path
Path to a file local to the minion.
hash
The hash to check against the file specified in the ``path`` argument.
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>=<hash_value>`` (e.g.
``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22 | Below is the the instruction that describes the task:
### Input:
Check if a file matches the given hash string
Returns ``True`` if the hash matches, otherwise ``False``.
path
Path to a file local to the minion.
hash
The hash to check against the file specified in the ``path`` argument.
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>=<hash_value>`` (e.g.
``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
### Response:
def check_hash(path, file_hash):
'''
Check if a file matches the given hash string
Returns ``True`` if the hash matches, otherwise ``False``.
path
Path to a file local to the minion.
hash
The hash to check against the file specified in the ``path`` argument.
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>=<hash_value>`` (e.g.
``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
'''
path = os.path.expanduser(path)
if not isinstance(file_hash, six.string_types):
raise SaltInvocationError('hash must be a string')
for sep in (':', '='):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
'Hash {0} (length: {1}) could not be matched to a supported '
'hash type. The supported hash types and lengths are: '
'{2}'.format(
file_hash,
hash_len,
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
return get_hash(path, hash_type) == hash_value |
def getIteratorSetting(self, login, tableName, iteratorName, scope):
"""
Parameters:
- login
- tableName
- iteratorName
- scope
"""
self.send_getIteratorSetting(login, tableName, iteratorName, scope)
return self.recv_getIteratorSetting() | Parameters:
- login
- tableName
- iteratorName
- scope | Below is the the instruction that describes the task:
### Input:
Parameters:
- login
- tableName
- iteratorName
- scope
### Response:
def getIteratorSetting(self, login, tableName, iteratorName, scope):
"""
Parameters:
- login
- tableName
- iteratorName
- scope
"""
self.send_getIteratorSetting(login, tableName, iteratorName, scope)
return self.recv_getIteratorSetting() |
def first(self, expression, order_expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None):
"""Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`.
Example:
>>> import vaex
>>> df = vaex.example()
>>> df.first(df.x, df.y, shape=8)
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977,
8.47446537, -5.73602629, 10.18783 ])
:param expression: The value to be placed in the bin.
:param order_expression: Order the values in the bins by this expression.
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param edges: {edges}
:return: Ndarray containing the first elements.
:rtype: numpy.array
"""
return self._compute_agg('first', expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions=[order_expression])
logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits)
logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits)
expression = _ensure_strings_from_expressions(expression)
order_expression = _ensure_string_from_expression(order_expression)
binby = _ensure_strings_from_expressions(binby)
waslist, [expressions,] = vaex.utils.listify(expression)
@delayed
def finish(*counts):
counts = np.asarray(counts)
return vaex.utils.unlistify(waslist, counts)
progressbar = vaex.utils.progressbars(progress)
limits = self.limits(binby, limits, delay=True, shape=shape)
stats = [self._first_calculation(expression, order_expression, binby=binby, limits=limits, shape=shape, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions]
var = finish(*stats)
return self._delay(delay, var) | Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`.
Example:
>>> import vaex
>>> df = vaex.example()
>>> df.first(df.x, df.y, shape=8)
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977,
8.47446537, -5.73602629, 10.18783 ])
:param expression: The value to be placed in the bin.
:param order_expression: Order the values in the bins by this expression.
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param edges: {edges}
:return: Ndarray containing the first elements.
:rtype: numpy.array | Below is the the instruction that describes the task:
### Input:
Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`.
Example:
>>> import vaex
>>> df = vaex.example()
>>> df.first(df.x, df.y, shape=8)
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977,
8.47446537, -5.73602629, 10.18783 ])
:param expression: The value to be placed in the bin.
:param order_expression: Order the values in the bins by this expression.
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param edges: {edges}
:return: Ndarray containing the first elements.
:rtype: numpy.array
### Response:
def first(self, expression, order_expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None):
"""Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`.
Example:
>>> import vaex
>>> df = vaex.example()
>>> df.first(df.x, df.y, shape=8)
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977,
8.47446537, -5.73602629, 10.18783 ])
:param expression: The value to be placed in the bin.
:param order_expression: Order the values in the bins by this expression.
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param edges: {edges}
:return: Ndarray containing the first elements.
:rtype: numpy.array
"""
return self._compute_agg('first', expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions=[order_expression])
logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits)
logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits)
expression = _ensure_strings_from_expressions(expression)
order_expression = _ensure_string_from_expression(order_expression)
binby = _ensure_strings_from_expressions(binby)
waslist, [expressions,] = vaex.utils.listify(expression)
@delayed
def finish(*counts):
counts = np.asarray(counts)
return vaex.utils.unlistify(waslist, counts)
progressbar = vaex.utils.progressbars(progress)
limits = self.limits(binby, limits, delay=True, shape=shape)
stats = [self._first_calculation(expression, order_expression, binby=binby, limits=limits, shape=shape, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions]
var = finish(*stats)
return self._delay(delay, var) |
def primers(self):
"""
Read in the primer file, and create a properly formatted output file that takes any degenerate bases
into account
"""
with open(self.formattedprimers, 'w') as formatted:
for record in SeqIO.parse(self.primerfile, 'fasta'):
# from https://stackoverflow.com/a/27552377 - find any degenerate bases in the primer sequence, and
# create all possibilities as a list
degenerates = Seq.IUPAC.IUPACData.ambiguous_dna_values
try:
primerlist = list(map("".join, product(*map(degenerates.get, str(record.seq)))))
except TypeError:
print("Invalid Primer Sequence: {seq}".format(seq=str(record.seq)))
sys.exit()
# As the record.id is being updated in the loop below, set the name of the primer here so that will
# be able to be recalled when setting the new record.ids
primername = record.id
# Iterate through all the possible primers created from any degenerate bases
for index, primer in enumerate(primerlist):
# Update the primer name with the position in the list to keep the name unique
record.id = primername + '_{index}'.format(index=index)
# Clear the description, as, otherwise, it will be added, and there will be duplicate information
record.description = ''
# Create a seqrecord from the primer sequence
record.seq = Seq.Seq(primer)
# Write the properly-formatted records to file
SeqIO.write(record, formatted, 'fasta')
# Populate a dictionary to store the length of the primers - will be used in determining whether
# BLAST hits are full-length
self.faidict[record.id] = len(str(record.seq))
# Ensure that the kmer length used in the initial baiting is no larger than the shorted primer
if len(str(record.seq)) < self.klength:
self.klength = len(str(record.seq)) | Read in the primer file, and create a properly formatted output file that takes any degenerate bases
into account | Below is the the instruction that describes the task:
### Input:
Read in the primer file, and create a properly formatted output file that takes any degenerate bases
into account
### Response:
def primers(self):
"""
Read in the primer file, and create a properly formatted output file that takes any degenerate bases
into account
"""
with open(self.formattedprimers, 'w') as formatted:
for record in SeqIO.parse(self.primerfile, 'fasta'):
# from https://stackoverflow.com/a/27552377 - find any degenerate bases in the primer sequence, and
# create all possibilities as a list
degenerates = Seq.IUPAC.IUPACData.ambiguous_dna_values
try:
primerlist = list(map("".join, product(*map(degenerates.get, str(record.seq)))))
except TypeError:
print("Invalid Primer Sequence: {seq}".format(seq=str(record.seq)))
sys.exit()
# As the record.id is being updated in the loop below, set the name of the primer here so that will
# be able to be recalled when setting the new record.ids
primername = record.id
# Iterate through all the possible primers created from any degenerate bases
for index, primer in enumerate(primerlist):
# Update the primer name with the position in the list to keep the name unique
record.id = primername + '_{index}'.format(index=index)
# Clear the description, as, otherwise, it will be added, and there will be duplicate information
record.description = ''
# Create a seqrecord from the primer sequence
record.seq = Seq.Seq(primer)
# Write the properly-formatted records to file
SeqIO.write(record, formatted, 'fasta')
# Populate a dictionary to store the length of the primers - will be used in determining whether
# BLAST hits are full-length
self.faidict[record.id] = len(str(record.seq))
# Ensure that the kmer length used in the initial baiting is no larger than the shorted primer
if len(str(record.seq)) < self.klength:
self.klength = len(str(record.seq)) |
def collectstatic(settings_module,
bin_env=None,
no_post_process=False,
ignore=None,
dry_run=False,
clear=False,
link=False,
no_default_ignore=False,
pythonpath=None,
env=None,
runas=None):
'''
Collect static files from each of your applications into a single location
that can easily be served in production.
CLI Example:
.. code-block:: bash
salt '*' django.collectstatic <settings_module>
'''
args = ['noinput']
kwargs = {}
if no_post_process:
args.append('no-post-process')
if ignore:
kwargs['ignore'] = ignore
if dry_run:
args.append('dry-run')
if clear:
args.append('clear')
if link:
args.append('link')
if no_default_ignore:
args.append('no-default-ignore')
return command(settings_module,
'collectstatic',
bin_env,
pythonpath,
env,
runas,
*args, **kwargs) | Collect static files from each of your applications into a single location
that can easily be served in production.
CLI Example:
.. code-block:: bash
salt '*' django.collectstatic <settings_module> | Below is the the instruction that describes the task:
### Input:
Collect static files from each of your applications into a single location
that can easily be served in production.
CLI Example:
.. code-block:: bash
salt '*' django.collectstatic <settings_module>
### Response:
def collectstatic(settings_module,
bin_env=None,
no_post_process=False,
ignore=None,
dry_run=False,
clear=False,
link=False,
no_default_ignore=False,
pythonpath=None,
env=None,
runas=None):
'''
Collect static files from each of your applications into a single location
that can easily be served in production.
CLI Example:
.. code-block:: bash
salt '*' django.collectstatic <settings_module>
'''
args = ['noinput']
kwargs = {}
if no_post_process:
args.append('no-post-process')
if ignore:
kwargs['ignore'] = ignore
if dry_run:
args.append('dry-run')
if clear:
args.append('clear')
if link:
args.append('link')
if no_default_ignore:
args.append('no-default-ignore')
return command(settings_module,
'collectstatic',
bin_env,
pythonpath,
env,
runas,
*args, **kwargs) |
def save_to_object(self):
"""Saves the current model state to a Python object. It also
saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
"""
tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir)
checkpoint_prefix = self.save(tmpdir)
data = {}
base_dir = os.path.dirname(checkpoint_prefix)
for path in os.listdir(base_dir):
path = os.path.join(base_dir, path)
if path.startswith(checkpoint_prefix):
with open(path, "rb") as f:
data[os.path.basename(path)] = f.read()
out = io.BytesIO()
data_dict = pickle.dumps({
"checkpoint_name": os.path.basename(checkpoint_prefix),
"data": data,
})
if len(data_dict) > 10e6: # getting pretty large
logger.info("Checkpoint size is {} bytes".format(len(data_dict)))
out.write(data_dict)
shutil.rmtree(tmpdir)
return out.getvalue() | Saves the current model state to a Python object. It also
saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data. | Below is the the instruction that describes the task:
### Input:
Saves the current model state to a Python object. It also
saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
### Response:
def save_to_object(self):
"""Saves the current model state to a Python object. It also
saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
"""
tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir)
checkpoint_prefix = self.save(tmpdir)
data = {}
base_dir = os.path.dirname(checkpoint_prefix)
for path in os.listdir(base_dir):
path = os.path.join(base_dir, path)
if path.startswith(checkpoint_prefix):
with open(path, "rb") as f:
data[os.path.basename(path)] = f.read()
out = io.BytesIO()
data_dict = pickle.dumps({
"checkpoint_name": os.path.basename(checkpoint_prefix),
"data": data,
})
if len(data_dict) > 10e6: # getting pretty large
logger.info("Checkpoint size is {} bytes".format(len(data_dict)))
out.write(data_dict)
shutil.rmtree(tmpdir)
return out.getvalue() |
def generate_np(self, x, **kwargs):
"""
Generate adversarial images in a for loop.
:param y: An array of shape (n, nb_classes) for true labels.
:param y_target: An array of shape (n, nb_classes) for target labels.
Required for targeted attack.
:param image_target: An array of shape (n, **image shape) for initial
target images. Required for targeted attack.
See parse_params for other kwargs.
"""
x_adv = []
if 'image_target' in kwargs and kwargs['image_target'] is not None:
image_target = np.copy(kwargs['image_target'])
else:
image_target = None
if 'y_target' in kwargs and kwargs['y_target'] is not None:
y_target = np.copy(kwargs['y_target'])
else:
y_target = None
for i, x_single in enumerate(x):
img = np.expand_dims(x_single, axis=0)
if image_target is not None:
single_img_target = np.expand_dims(image_target[i], axis=0)
kwargs['image_target'] = single_img_target
if y_target is not None:
single_y_target = np.expand_dims(y_target[i], axis=0)
kwargs['y_target'] = single_y_target
adv_img = super(BoundaryAttackPlusPlus,
self).generate_np(img, **kwargs)
x_adv.append(adv_img)
return np.concatenate(x_adv, axis=0) | Generate adversarial images in a for loop.
:param y: An array of shape (n, nb_classes) for true labels.
:param y_target: An array of shape (n, nb_classes) for target labels.
Required for targeted attack.
:param image_target: An array of shape (n, **image shape) for initial
target images. Required for targeted attack.
See parse_params for other kwargs. | Below is the the instruction that describes the task:
### Input:
Generate adversarial images in a for loop.
:param y: An array of shape (n, nb_classes) for true labels.
:param y_target: An array of shape (n, nb_classes) for target labels.
Required for targeted attack.
:param image_target: An array of shape (n, **image shape) for initial
target images. Required for targeted attack.
See parse_params for other kwargs.
### Response:
def generate_np(self, x, **kwargs):
"""
Generate adversarial images in a for loop.
:param y: An array of shape (n, nb_classes) for true labels.
:param y_target: An array of shape (n, nb_classes) for target labels.
Required for targeted attack.
:param image_target: An array of shape (n, **image shape) for initial
target images. Required for targeted attack.
See parse_params for other kwargs.
"""
x_adv = []
if 'image_target' in kwargs and kwargs['image_target'] is not None:
image_target = np.copy(kwargs['image_target'])
else:
image_target = None
if 'y_target' in kwargs and kwargs['y_target'] is not None:
y_target = np.copy(kwargs['y_target'])
else:
y_target = None
for i, x_single in enumerate(x):
img = np.expand_dims(x_single, axis=0)
if image_target is not None:
single_img_target = np.expand_dims(image_target[i], axis=0)
kwargs['image_target'] = single_img_target
if y_target is not None:
single_y_target = np.expand_dims(y_target[i], axis=0)
kwargs['y_target'] = single_y_target
adv_img = super(BoundaryAttackPlusPlus,
self).generate_np(img, **kwargs)
x_adv.append(adv_img)
return np.concatenate(x_adv, axis=0) |
def init(**kwargs):
"""Initialize the specified names in the specified databases.
The general process is as follows:
- Ensure the database in question exists
- Ensure all tables exist in the database.
"""
# TODO: Iterate through all engines in name set.
database = kwargs.pop('database', False)
if database and not database_exists(engine['default'].url):
create_database(engine['default'].url, encoding='utf8')
clear_cache()
expression = lambda target, table: table.create(target)
test = lambda target, table: table.exists(target)
op(expression, test=test, primary='init', secondary='create', **kwargs) | Initialize the specified names in the specified databases.
The general process is as follows:
- Ensure the database in question exists
- Ensure all tables exist in the database. | Below is the the instruction that describes the task:
### Input:
Initialize the specified names in the specified databases.
The general process is as follows:
- Ensure the database in question exists
- Ensure all tables exist in the database.
### Response:
def init(**kwargs):
"""Initialize the specified names in the specified databases.
The general process is as follows:
- Ensure the database in question exists
- Ensure all tables exist in the database.
"""
# TODO: Iterate through all engines in name set.
database = kwargs.pop('database', False)
if database and not database_exists(engine['default'].url):
create_database(engine['default'].url, encoding='utf8')
clear_cache()
expression = lambda target, table: table.create(target)
test = lambda target, table: table.exists(target)
op(expression, test=test, primary='init', secondary='create', **kwargs) |
def numbafy(fn, args, compiler="jit", **nbkws):
"""
Compile a string, sympy expression or symengine expression using numba.
Not all functions are supported by Python's numerical package (numpy). For
difficult cases, valid Python code (as string) may be more suitable than
symbolic expressions coming from sympy, symengine, etc. When compiling
vectorized functions, include valid signatures (see `numba`_ documentation).
Args:
fn: Symbolic expression as sympy/symengine expression or string
args (iterable): Symbolic arguments
compiler: String name or callable numba compiler
nbkws: Compiler keyword arguments (if none provided, smart defaults are used)
Returns:
func: Compiled function
Warning:
For vectorized functions, valid signatures are (almost always) required.
"""
kwargs = {} # Numba kwargs to be updated by user
if not isinstance(args, (tuple, list)):
args = (args, )
# Parameterize compiler
if isinstance(compiler, six.string_types):
compiler_ = getattr(nb, compiler, None)
if compiler is None:
raise AttributeError("No numba function with name {}.".format(compiler))
compiler = compiler_
if compiler in (nb.jit, nb.njit, nb.autojit):
kwargs.update(jitkwargs)
sig = nbkws.pop("signature", None)
else:
kwargs.update(veckwargs)
sig = nbkws.pop("signatures", None)
if sig is None:
warn("Vectorization without 'signatures' can lead to wrong results!")
kwargs.update(nbkws)
# Expand sympy expressions and create string for eval
if isinstance(fn, sy.Expr):
fn = sy.expand_func(fn)
func = sy.lambdify(args, fn, modules='numpy')
# Machine code compilation
if sig is None:
try:
func = compiler(**kwargs)(func)
except RuntimeError:
kwargs['cache'] = False
func = compiler(**kwargs)(func)
else:
try:
func = compiler(sig, **kwargs)(func)
except RuntimeError:
kwargs['cache'] = False
func = compiler(sig, **kwargs)(func)
return func | Compile a string, sympy expression or symengine expression using numba.
Not all functions are supported by Python's numerical package (numpy). For
difficult cases, valid Python code (as string) may be more suitable than
symbolic expressions coming from sympy, symengine, etc. When compiling
vectorized functions, include valid signatures (see `numba`_ documentation).
Args:
fn: Symbolic expression as sympy/symengine expression or string
args (iterable): Symbolic arguments
compiler: String name or callable numba compiler
nbkws: Compiler keyword arguments (if none provided, smart defaults are used)
Returns:
func: Compiled function
Warning:
For vectorized functions, valid signatures are (almost always) required. | Below is the the instruction that describes the task:
### Input:
Compile a string, sympy expression or symengine expression using numba.
Not all functions are supported by Python's numerical package (numpy). For
difficult cases, valid Python code (as string) may be more suitable than
symbolic expressions coming from sympy, symengine, etc. When compiling
vectorized functions, include valid signatures (see `numba`_ documentation).
Args:
fn: Symbolic expression as sympy/symengine expression or string
args (iterable): Symbolic arguments
compiler: String name or callable numba compiler
nbkws: Compiler keyword arguments (if none provided, smart defaults are used)
Returns:
func: Compiled function
Warning:
For vectorized functions, valid signatures are (almost always) required.
### Response:
def numbafy(fn, args, compiler="jit", **nbkws):
"""
Compile a string, sympy expression or symengine expression using numba.
Not all functions are supported by Python's numerical package (numpy). For
difficult cases, valid Python code (as string) may be more suitable than
symbolic expressions coming from sympy, symengine, etc. When compiling
vectorized functions, include valid signatures (see `numba`_ documentation).
Args:
fn: Symbolic expression as sympy/symengine expression or string
args (iterable): Symbolic arguments
compiler: String name or callable numba compiler
nbkws: Compiler keyword arguments (if none provided, smart defaults are used)
Returns:
func: Compiled function
Warning:
For vectorized functions, valid signatures are (almost always) required.
"""
kwargs = {} # Numba kwargs to be updated by user
if not isinstance(args, (tuple, list)):
args = (args, )
# Parameterize compiler
if isinstance(compiler, six.string_types):
compiler_ = getattr(nb, compiler, None)
if compiler is None:
raise AttributeError("No numba function with name {}.".format(compiler))
compiler = compiler_
if compiler in (nb.jit, nb.njit, nb.autojit):
kwargs.update(jitkwargs)
sig = nbkws.pop("signature", None)
else:
kwargs.update(veckwargs)
sig = nbkws.pop("signatures", None)
if sig is None:
warn("Vectorization without 'signatures' can lead to wrong results!")
kwargs.update(nbkws)
# Expand sympy expressions and create string for eval
if isinstance(fn, sy.Expr):
fn = sy.expand_func(fn)
func = sy.lambdify(args, fn, modules='numpy')
# Machine code compilation
if sig is None:
try:
func = compiler(**kwargs)(func)
except RuntimeError:
kwargs['cache'] = False
func = compiler(**kwargs)(func)
else:
try:
func = compiler(sig, **kwargs)(func)
except RuntimeError:
kwargs['cache'] = False
func = compiler(sig, **kwargs)(func)
return func |
def del_handler(self, handle):
"""
Remove the handle registered for `handle`
:raises KeyError:
The handle wasn't registered.
"""
_, _, _, respondent = self._handle_map.pop(handle)
if respondent:
self._handles_by_respondent[respondent].discard(handle) | Remove the handle registered for `handle`
:raises KeyError:
The handle wasn't registered. | Below is the the instruction that describes the task:
### Input:
Remove the handle registered for `handle`
:raises KeyError:
The handle wasn't registered.
### Response:
def del_handler(self, handle):
"""
Remove the handle registered for `handle`
:raises KeyError:
The handle wasn't registered.
"""
_, _, _, respondent = self._handle_map.pop(handle)
if respondent:
self._handles_by_respondent[respondent].discard(handle) |
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None | Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None | Below is the the instruction that describes the task:
### Input:
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
### Response:
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None |
def enumerate_spans(sentence: List[T],
offset: int = 0,
max_span_width: int = None,
min_span_width: int = 1,
filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]:
"""
Given a sentence, return all token spans within the sentence. Spans are `inclusive`.
Additionally, you can provide a maximum and minimum span width, which will be used
to exclude spans outside of this range.
Finally, you can provide a function mapping ``List[T] -> bool``, which will
be applied to every span to decide whether that span should be included. This
allows filtering by length, regex matches, pos tags or any Spacy ``Token``
attributes, for example.
Parameters
----------
sentence : ``List[T]``, required.
The sentence to generate spans for. The type is generic, as this function
can be used with strings, or Spacy ``Tokens`` or other sequences.
offset : ``int``, optional (default = 0)
A numeric offset to add to all span start and end indices. This is helpful
if the sentence is part of a larger structure, such as a document, which
the indices need to respect.
max_span_width : ``int``, optional (default = None)
The maximum length of spans which should be included. Defaults to len(sentence).
min_span_width : ``int``, optional (default = 1)
The minimum length of spans which should be included. Defaults to 1.
filter_function : ``Callable[[List[T]], bool]``, optional (default = None)
A function mapping sequences of the passed type T to a boolean value.
If ``True``, the span is included in the returned spans from the
sentence, otherwise it is excluded..
"""
max_span_width = max_span_width or len(sentence)
filter_function = filter_function or (lambda x: True)
spans: List[Tuple[int, int]] = []
for start_index in range(len(sentence)):
last_end_index = min(start_index + max_span_width, len(sentence))
first_end_index = min(start_index + min_span_width - 1, len(sentence))
for end_index in range(first_end_index, last_end_index):
start = offset + start_index
end = offset + end_index
# add 1 to end index because span indices are inclusive.
if filter_function(sentence[slice(start_index, end_index + 1)]):
spans.append((start, end))
return spans | Given a sentence, return all token spans within the sentence. Spans are `inclusive`.
Additionally, you can provide a maximum and minimum span width, which will be used
to exclude spans outside of this range.
Finally, you can provide a function mapping ``List[T] -> bool``, which will
be applied to every span to decide whether that span should be included. This
allows filtering by length, regex matches, pos tags or any Spacy ``Token``
attributes, for example.
Parameters
----------
sentence : ``List[T]``, required.
The sentence to generate spans for. The type is generic, as this function
can be used with strings, or Spacy ``Tokens`` or other sequences.
offset : ``int``, optional (default = 0)
A numeric offset to add to all span start and end indices. This is helpful
if the sentence is part of a larger structure, such as a document, which
the indices need to respect.
max_span_width : ``int``, optional (default = None)
The maximum length of spans which should be included. Defaults to len(sentence).
min_span_width : ``int``, optional (default = 1)
The minimum length of spans which should be included. Defaults to 1.
filter_function : ``Callable[[List[T]], bool]``, optional (default = None)
A function mapping sequences of the passed type T to a boolean value.
If ``True``, the span is included in the returned spans from the
sentence, otherwise it is excluded.. | Below is the the instruction that describes the task:
### Input:
Given a sentence, return all token spans within the sentence. Spans are `inclusive`.
Additionally, you can provide a maximum and minimum span width, which will be used
to exclude spans outside of this range.
Finally, you can provide a function mapping ``List[T] -> bool``, which will
be applied to every span to decide whether that span should be included. This
allows filtering by length, regex matches, pos tags or any Spacy ``Token``
attributes, for example.
Parameters
----------
sentence : ``List[T]``, required.
The sentence to generate spans for. The type is generic, as this function
can be used with strings, or Spacy ``Tokens`` or other sequences.
offset : ``int``, optional (default = 0)
A numeric offset to add to all span start and end indices. This is helpful
if the sentence is part of a larger structure, such as a document, which
the indices need to respect.
max_span_width : ``int``, optional (default = None)
The maximum length of spans which should be included. Defaults to len(sentence).
min_span_width : ``int``, optional (default = 1)
The minimum length of spans which should be included. Defaults to 1.
filter_function : ``Callable[[List[T]], bool]``, optional (default = None)
A function mapping sequences of the passed type T to a boolean value.
If ``True``, the span is included in the returned spans from the
sentence, otherwise it is excluded..
### Response:
def enumerate_spans(sentence: List[T],
offset: int = 0,
max_span_width: int = None,
min_span_width: int = 1,
filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]:
"""
Given a sentence, return all token spans within the sentence. Spans are `inclusive`.
Additionally, you can provide a maximum and minimum span width, which will be used
to exclude spans outside of this range.
Finally, you can provide a function mapping ``List[T] -> bool``, which will
be applied to every span to decide whether that span should be included. This
allows filtering by length, regex matches, pos tags or any Spacy ``Token``
attributes, for example.
Parameters
----------
sentence : ``List[T]``, required.
The sentence to generate spans for. The type is generic, as this function
can be used with strings, or Spacy ``Tokens`` or other sequences.
offset : ``int``, optional (default = 0)
A numeric offset to add to all span start and end indices. This is helpful
if the sentence is part of a larger structure, such as a document, which
the indices need to respect.
max_span_width : ``int``, optional (default = None)
The maximum length of spans which should be included. Defaults to len(sentence).
min_span_width : ``int``, optional (default = 1)
The minimum length of spans which should be included. Defaults to 1.
filter_function : ``Callable[[List[T]], bool]``, optional (default = None)
A function mapping sequences of the passed type T to a boolean value.
If ``True``, the span is included in the returned spans from the
sentence, otherwise it is excluded..
"""
max_span_width = max_span_width or len(sentence)
filter_function = filter_function or (lambda x: True)
spans: List[Tuple[int, int]] = []
for start_index in range(len(sentence)):
last_end_index = min(start_index + max_span_width, len(sentence))
first_end_index = min(start_index + min_span_width - 1, len(sentence))
for end_index in range(first_end_index, last_end_index):
start = offset + start_index
end = offset + end_index
# add 1 to end index because span indices are inclusive.
if filter_function(sentence[slice(start_index, end_index + 1)]):
spans.append((start, end))
return spans |
def calc_synch_snu_ujy(b, ne, delta, sinth, width, elongation, dist, ghz, E0=1.):
"""Calculate a flux density from pure gyrosynchrotron emission.
This combines Dulk (1985) equations 40 and 41, which are fitting functions
assuming a power-law electron population, with standard radiative transfer
through a uniform medium. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
It's not specified for what range of values the expressions work well.
width
The characteristic cross-sectional width of the emitting region, in cm.
elongation
The the elongation of the emitting region; ``depth = width * elongation``.
dist
The distance to the emitting region, in cm.
ghz
The frequencies at which to evaluate the spectrum, **in GHz**.
E0
The minimum energy of electrons to consider, in MeV. Defaults to 1 so that
these functions can be called identically to the gyrosynchrotron functions.
The return value is the flux density **in μJy**. The arguments can be
Numpy arrays.
No complaints are raised if you attempt to use the equations outside of
their range of validity.
"""
hz = ghz * 1e9
eta = calc_synch_eta(b, ne, delta, sinth, hz, E0=E0)
kappa = calc_synch_kappa(b, ne, delta, sinth, hz, E0=E0)
snu = calc_snu(eta, kappa, width, elongation, dist)
ujy = snu * cgs.jypercgs * 1e6
return ujy | Calculate a flux density from pure gyrosynchrotron emission.
This combines Dulk (1985) equations 40 and 41, which are fitting functions
assuming a power-law electron population, with standard radiative transfer
through a uniform medium. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
It's not specified for what range of values the expressions work well.
width
The characteristic cross-sectional width of the emitting region, in cm.
elongation
The the elongation of the emitting region; ``depth = width * elongation``.
dist
The distance to the emitting region, in cm.
ghz
The frequencies at which to evaluate the spectrum, **in GHz**.
E0
The minimum energy of electrons to consider, in MeV. Defaults to 1 so that
these functions can be called identically to the gyrosynchrotron functions.
The return value is the flux density **in μJy**. The arguments can be
Numpy arrays.
No complaints are raised if you attempt to use the equations outside of
their range of validity. | Below is the the instruction that describes the task:
### Input:
Calculate a flux density from pure gyrosynchrotron emission.
This combines Dulk (1985) equations 40 and 41, which are fitting functions
assuming a power-law electron population, with standard radiative transfer
through a uniform medium. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
It's not specified for what range of values the expressions work well.
width
The characteristic cross-sectional width of the emitting region, in cm.
elongation
The the elongation of the emitting region; ``depth = width * elongation``.
dist
The distance to the emitting region, in cm.
ghz
The frequencies at which to evaluate the spectrum, **in GHz**.
E0
The minimum energy of electrons to consider, in MeV. Defaults to 1 so that
these functions can be called identically to the gyrosynchrotron functions.
The return value is the flux density **in μJy**. The arguments can be
Numpy arrays.
No complaints are raised if you attempt to use the equations outside of
their range of validity.
### Response:
def calc_synch_snu_ujy(b, ne, delta, sinth, width, elongation, dist, ghz, E0=1.):
"""Calculate a flux density from pure gyrosynchrotron emission.
This combines Dulk (1985) equations 40 and 41, which are fitting functions
assuming a power-law electron population, with standard radiative transfer
through a uniform medium. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
It's not specified for what range of values the expressions work well.
width
The characteristic cross-sectional width of the emitting region, in cm.
elongation
The the elongation of the emitting region; ``depth = width * elongation``.
dist
The distance to the emitting region, in cm.
ghz
The frequencies at which to evaluate the spectrum, **in GHz**.
E0
The minimum energy of electrons to consider, in MeV. Defaults to 1 so that
these functions can be called identically to the gyrosynchrotron functions.
The return value is the flux density **in μJy**. The arguments can be
Numpy arrays.
No complaints are raised if you attempt to use the equations outside of
their range of validity.
"""
hz = ghz * 1e9
eta = calc_synch_eta(b, ne, delta, sinth, hz, E0=E0)
kappa = calc_synch_kappa(b, ne, delta, sinth, hz, E0=E0)
snu = calc_snu(eta, kappa, width, elongation, dist)
ujy = snu * cgs.jypercgs * 1e6
return ujy |
def GetArtifactsForCollection(os_name, artifact_list):
"""Wrapper for the ArtifactArranger.
Extend the artifact list by dependencies and sort the artifacts to resolve the
dependencies.
Args:
os_name: String specifying the OS name.
artifact_list: List of requested artifact names.
Returns:
A list of artifacts such that if they are collected in the given order
their dependencies are resolved.
"""
artifact_arranger = ArtifactArranger(os_name, artifact_list)
artifact_names = artifact_arranger.GetArtifactsInProperOrder()
return artifact_names | Wrapper for the ArtifactArranger.
Extend the artifact list by dependencies and sort the artifacts to resolve the
dependencies.
Args:
os_name: String specifying the OS name.
artifact_list: List of requested artifact names.
Returns:
A list of artifacts such that if they are collected in the given order
their dependencies are resolved. | Below is the the instruction that describes the task:
### Input:
Wrapper for the ArtifactArranger.
Extend the artifact list by dependencies and sort the artifacts to resolve the
dependencies.
Args:
os_name: String specifying the OS name.
artifact_list: List of requested artifact names.
Returns:
A list of artifacts such that if they are collected in the given order
their dependencies are resolved.
### Response:
def GetArtifactsForCollection(os_name, artifact_list):
"""Wrapper for the ArtifactArranger.
Extend the artifact list by dependencies and sort the artifacts to resolve the
dependencies.
Args:
os_name: String specifying the OS name.
artifact_list: List of requested artifact names.
Returns:
A list of artifacts such that if they are collected in the given order
their dependencies are resolved.
"""
artifact_arranger = ArtifactArranger(os_name, artifact_list)
artifact_names = artifact_arranger.GetArtifactsInProperOrder()
return artifact_names |
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
else:
# Needed to handle pseudos with fractional charge
int_num = np.rint(float_num)
logger.warning("Converting float %s to int %s" % (float_num, int_num))
return int_num | Convert string into integer
Raise:
TypeError if string is not a valid integer | Below is the the instruction that describes the task:
### Input:
Convert string into integer
Raise:
TypeError if string is not a valid integer
### Response:
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
else:
# Needed to handle pseudos with fractional charge
int_num = np.rint(float_num)
logger.warning("Converting float %s to int %s" % (float_num, int_num))
return int_num |
def refill(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False):
"""
Refill wallets with the necessary fuel to perform spool transactions
Args:
from_address (Tuple[str]): Federation wallet address. Fuels the wallets with tokens and fees. All transactions to wallets
holding a particular piece should come from the Federation wallet
to_address (str): Wallet address that needs to perform a spool transaction
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Federation wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id
"""
path, from_address = from_address
verb = Spoolverb()
# nfees + 1: nfees to refill plus one fee for the refill transaction itself
inputs = self.select_inputs(from_address, nfees + 1, ntokens, min_confirmations=min_confirmations)
outputs = [{'address': to_address, 'value': self.token}] * ntokens
outputs += [{'address': to_address, 'value': self.fee}] * nfees
outputs += [{'script': self._t._op_return_hex(verb.fuel), 'value': 0}]
unsigned_tx = self._t.build_transaction(inputs, outputs)
signed_tx = self._t.sign_transaction(unsigned_tx, password, path=path)
txid = self._t.push(signed_tx)
return txid | Refill wallets with the necessary fuel to perform spool transactions
Args:
from_address (Tuple[str]): Federation wallet address. Fuels the wallets with tokens and fees. All transactions to wallets
holding a particular piece should come from the Federation wallet
to_address (str): Wallet address that needs to perform a spool transaction
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Federation wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id | Below is the the instruction that describes the task:
### Input:
Refill wallets with the necessary fuel to perform spool transactions
Args:
from_address (Tuple[str]): Federation wallet address. Fuels the wallets with tokens and fees. All transactions to wallets
holding a particular piece should come from the Federation wallet
to_address (str): Wallet address that needs to perform a spool transaction
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Federation wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id
### Response:
def refill(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False):
"""
Refill wallets with the necessary fuel to perform spool transactions
Args:
from_address (Tuple[str]): Federation wallet address. Fuels the wallets with tokens and fees. All transactions to wallets
holding a particular piece should come from the Federation wallet
to_address (str): Wallet address that needs to perform a spool transaction
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Federation wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id
"""
path, from_address = from_address
verb = Spoolverb()
# nfees + 1: nfees to refill plus one fee for the refill transaction itself
inputs = self.select_inputs(from_address, nfees + 1, ntokens, min_confirmations=min_confirmations)
outputs = [{'address': to_address, 'value': self.token}] * ntokens
outputs += [{'address': to_address, 'value': self.fee}] * nfees
outputs += [{'script': self._t._op_return_hex(verb.fuel), 'value': 0}]
unsigned_tx = self._t.build_transaction(inputs, outputs)
signed_tx = self._t.sign_transaction(unsigned_tx, password, path=path)
txid = self._t.push(signed_tx)
return txid |
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name) | Iterator to loop over all callback properties. | Below is the the instruction that describes the task:
### Input:
Iterator to loop over all callback properties.
### Response:
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name) |
def sign(self, storepass=None, keypass=None, keystore=None, apk=None, alias=None, name='app'):
"""
Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default.
:param storepass(str): keystore file storepass
:param keypass(str): keystore file keypass
:param keystore(str): keystore file path
:param apk(str): apk file path to be signed
:param alias(str): keystore file alias
:param name(str): signed apk name to be used by zipalign
"""
self.src_folder = self.get_src_folder()
if keystore is None:
(keystore, storepass, keypass, alias) = android_helper.get_default_keystore()
dist = '%s/%s.apk' % ('/'.join(apk.split('/')[:-1]), name)
android_helper.jarsign(storepass, keypass, keystore, apk, alias, path=self.path)
android_helper.zipalign(apk, dist, build_tool=self.get_build_tool_version(), path=self.path) | Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default.
:param storepass(str): keystore file storepass
:param keypass(str): keystore file keypass
:param keystore(str): keystore file path
:param apk(str): apk file path to be signed
:param alias(str): keystore file alias
:param name(str): signed apk name to be used by zipalign | Below is the the instruction that describes the task:
### Input:
Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default.
:param storepass(str): keystore file storepass
:param keypass(str): keystore file keypass
:param keystore(str): keystore file path
:param apk(str): apk file path to be signed
:param alias(str): keystore file alias
:param name(str): signed apk name to be used by zipalign
### Response:
def sign(self, storepass=None, keypass=None, keystore=None, apk=None, alias=None, name='app'):
"""
Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default.
:param storepass(str): keystore file storepass
:param keypass(str): keystore file keypass
:param keystore(str): keystore file path
:param apk(str): apk file path to be signed
:param alias(str): keystore file alias
:param name(str): signed apk name to be used by zipalign
"""
self.src_folder = self.get_src_folder()
if keystore is None:
(keystore, storepass, keypass, alias) = android_helper.get_default_keystore()
dist = '%s/%s.apk' % ('/'.join(apk.split('/')[:-1]), name)
android_helper.jarsign(storepass, keypass, keystore, apk, alias, path=self.path)
android_helper.zipalign(apk, dist, build_tool=self.get_build_tool_version(), path=self.path) |
def _get_opstr(op, cls):
"""
Find the operation string, if any, to pass to numexpr for this
operation.
Parameters
----------
op : binary operator
cls : class
Returns
-------
op_str : string or None
"""
# numexpr is available for non-sparse classes
subtyp = getattr(cls, '_subtyp', '')
use_numexpr = 'sparse' not in subtyp
if not use_numexpr:
# if we're not using numexpr, then don't pass a str_rep
return None
return {operator.add: '+',
radd: '+',
operator.mul: '*',
rmul: '*',
operator.sub: '-',
rsub: '-',
operator.truediv: '/',
rtruediv: '/',
operator.floordiv: '//',
rfloordiv: '//',
operator.mod: None, # TODO: Why None for mod but '%' for rmod?
rmod: '%',
operator.pow: '**',
rpow: '**',
operator.eq: '==',
operator.ne: '!=',
operator.le: '<=',
operator.lt: '<',
operator.ge: '>=',
operator.gt: '>',
operator.and_: '&',
rand_: '&',
operator.or_: '|',
ror_: '|',
operator.xor: '^',
rxor: '^',
divmod: None,
rdivmod: None}[op] | Find the operation string, if any, to pass to numexpr for this
operation.
Parameters
----------
op : binary operator
cls : class
Returns
-------
op_str : string or None | Below is the the instruction that describes the task:
### Input:
Find the operation string, if any, to pass to numexpr for this
operation.
Parameters
----------
op : binary operator
cls : class
Returns
-------
op_str : string or None
### Response:
def _get_opstr(op, cls):
"""
Find the operation string, if any, to pass to numexpr for this
operation.
Parameters
----------
op : binary operator
cls : class
Returns
-------
op_str : string or None
"""
# numexpr is available for non-sparse classes
subtyp = getattr(cls, '_subtyp', '')
use_numexpr = 'sparse' not in subtyp
if not use_numexpr:
# if we're not using numexpr, then don't pass a str_rep
return None
return {operator.add: '+',
radd: '+',
operator.mul: '*',
rmul: '*',
operator.sub: '-',
rsub: '-',
operator.truediv: '/',
rtruediv: '/',
operator.floordiv: '//',
rfloordiv: '//',
operator.mod: None, # TODO: Why None for mod but '%' for rmod?
rmod: '%',
operator.pow: '**',
rpow: '**',
operator.eq: '==',
operator.ne: '!=',
operator.le: '<=',
operator.lt: '<',
operator.ge: '>=',
operator.gt: '>',
operator.and_: '&',
rand_: '&',
operator.or_: '|',
ror_: '|',
operator.xor: '^',
rxor: '^',
divmod: None,
rdivmod: None}[op] |
def to_ipa(s):
"""Convert *s* to IPA."""
identity = identify(s)
if identity == IPA:
return s
elif identity == PINYIN:
return pinyin_to_ipa(s)
elif identity == ZHUYIN:
return zhuyin_to_ipa(s)
else:
raise ValueError("String is not a valid Chinese transcription.") | Convert *s* to IPA. | Below is the the instruction that describes the task:
### Input:
Convert *s* to IPA.
### Response:
def to_ipa(s):
"""Convert *s* to IPA."""
identity = identify(s)
if identity == IPA:
return s
elif identity == PINYIN:
return pinyin_to_ipa(s)
elif identity == ZHUYIN:
return zhuyin_to_ipa(s)
else:
raise ValueError("String is not a valid Chinese transcription.") |
def sanitizeString(name):
"""Cleans string in preparation for splitting for use as a pairtree
identifier."""
newString = name
# string cleaning, pass 1
replaceTable = [
('^', '^5e'), # we need to do this one first
('"', '^22'),
('<', '^3c'),
('?', '^3f'),
('*', '^2a'),
('=', '^3d'),
('+', '^2b'),
('>', '^3e'),
('|', '^7c'),
(',', '^2c'),
]
# " hex 22 < hex 3c ? hex 3f
# * hex 2a = hex 3d ^ hex 5e
# + hex 2b > hex 3e | hex 7c
# , hex 2c
for r in replaceTable:
newString = newString.replace(r[0], r[1])
# replace ascii 0-32
for x in range(0, 33):
# must add somewhat arbitrary num to avoid conflict at deSanitization
# conflict example: is ^x1e supposed to be ^x1 (ascii 1) followed by
# letter 'e' or really ^x1e (ascii 30)
newString = newString.replace(
chr(x), hex(x + sanitizerNum).replace('0x', '^'))
replaceTable2 = [
("/", "="),
(":", "+"),
(".", ","),
]
# / -> =
# : -> +
# . -> ,
# string cleaning pass 2
for r in replaceTable2:
newString = newString.replace(r[0], r[1])
return newString | Cleans string in preparation for splitting for use as a pairtree
identifier. | Below is the the instruction that describes the task:
### Input:
Cleans string in preparation for splitting for use as a pairtree
identifier.
### Response:
def sanitizeString(name):
"""Cleans string in preparation for splitting for use as a pairtree
identifier."""
newString = name
# string cleaning, pass 1
replaceTable = [
('^', '^5e'), # we need to do this one first
('"', '^22'),
('<', '^3c'),
('?', '^3f'),
('*', '^2a'),
('=', '^3d'),
('+', '^2b'),
('>', '^3e'),
('|', '^7c'),
(',', '^2c'),
]
# " hex 22 < hex 3c ? hex 3f
# * hex 2a = hex 3d ^ hex 5e
# + hex 2b > hex 3e | hex 7c
# , hex 2c
for r in replaceTable:
newString = newString.replace(r[0], r[1])
# replace ascii 0-32
for x in range(0, 33):
# must add somewhat arbitrary num to avoid conflict at deSanitization
# conflict example: is ^x1e supposed to be ^x1 (ascii 1) followed by
# letter 'e' or really ^x1e (ascii 30)
newString = newString.replace(
chr(x), hex(x + sanitizerNum).replace('0x', '^'))
replaceTable2 = [
("/", "="),
(":", "+"),
(".", ","),
]
# / -> =
# : -> +
# . -> ,
# string cleaning pass 2
for r in replaceTable2:
newString = newString.replace(r[0], r[1])
return newString |
def is_instance_running(self, instance_id):
"""
Check if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
self._init_az_api()
# Here, it's always better if we update the instance.
vm = self._get_vm(instance_id, force_reload=True)
# FIXME: should we rather check `vm.instance_view.statuses`
# and search for `.code == "PowerState/running"`? or
# `vm.instance_view.vm_agent.statuses` and search for `.code
# == 'ProvisioningState/suceeded'`?
return vm.provisioning_state == u'Succeeded' | Check if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise | Below is the the instruction that describes the task:
### Input:
Check if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
### Response:
def is_instance_running(self, instance_id):
"""
Check if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
self._init_az_api()
# Here, it's always better if we update the instance.
vm = self._get_vm(instance_id, force_reload=True)
# FIXME: should we rather check `vm.instance_view.statuses`
# and search for `.code == "PowerState/running"`? or
# `vm.instance_view.vm_agent.statuses` and search for `.code
# == 'ProvisioningState/suceeded'`?
return vm.provisioning_state == u'Succeeded' |
def from_series(self, series, add_index_column=True):
"""
Set tabular attributes to the writer from :py:class:`pandas.Series`.
Following attributes are set by the method:
- :py:attr:`~.headers`
- :py:attr:`~.value_matrix`
- :py:attr:`~.type_hints`
Args:
series(pandas.Series):
Input pandas.Series object.
add_index_column(bool, optional):
If |True|, add a column of ``index`` of the ``series``.
Defaults to |True|.
"""
if series.name:
self.headers = [series.name]
else:
self.headers = ["value"]
self.type_hints = [self.__get_typehint_from_dtype(series.dtype)]
if add_index_column:
self.headers = [""] + self.headers
if self.type_hints:
self.type_hints = [None] + self.type_hints
self.value_matrix = [
[index] + [value] for index, value in zip(series.index.tolist(), series.tolist())
]
else:
self.value_matrix = [[value] for value in series.tolist()] | Set tabular attributes to the writer from :py:class:`pandas.Series`.
Following attributes are set by the method:
- :py:attr:`~.headers`
- :py:attr:`~.value_matrix`
- :py:attr:`~.type_hints`
Args:
series(pandas.Series):
Input pandas.Series object.
add_index_column(bool, optional):
If |True|, add a column of ``index`` of the ``series``.
Defaults to |True|. | Below is the the instruction that describes the task:
### Input:
Set tabular attributes to the writer from :py:class:`pandas.Series`.
Following attributes are set by the method:
- :py:attr:`~.headers`
- :py:attr:`~.value_matrix`
- :py:attr:`~.type_hints`
Args:
series(pandas.Series):
Input pandas.Series object.
add_index_column(bool, optional):
If |True|, add a column of ``index`` of the ``series``.
Defaults to |True|.
### Response:
def from_series(self, series, add_index_column=True):
"""
Set tabular attributes to the writer from :py:class:`pandas.Series`.
Following attributes are set by the method:
- :py:attr:`~.headers`
- :py:attr:`~.value_matrix`
- :py:attr:`~.type_hints`
Args:
series(pandas.Series):
Input pandas.Series object.
add_index_column(bool, optional):
If |True|, add a column of ``index`` of the ``series``.
Defaults to |True|.
"""
if series.name:
self.headers = [series.name]
else:
self.headers = ["value"]
self.type_hints = [self.__get_typehint_from_dtype(series.dtype)]
if add_index_column:
self.headers = [""] + self.headers
if self.type_hints:
self.type_hints = [None] + self.type_hints
self.value_matrix = [
[index] + [value] for index, value in zip(series.index.tolist(), series.tolist())
]
else:
self.value_matrix = [[value] for value in series.tolist()] |
def _move_file_with_sizecheck(tx_file, final_file):
"""Move transaction file to final location,
with size checks avoiding failed transfers.
Creates an empty file with '.bcbiotmp' extention in the destination
location, which serves as a flag. If a file like that is present,
it means that transaction didn't finish successfully.
"""
#logger.debug("Moving %s to %s" % (tx_file, final_file))
tmp_file = final_file + ".bcbiotmp"
open(tmp_file, 'wb').close()
want_size = utils.get_size(tx_file)
shutil.move(tx_file, final_file)
transfer_size = utils.get_size(final_file)
assert want_size == transfer_size, (
'distributed.transaction.file_transaction: File copy error: '
'file or directory on temporary storage ({}) size {} bytes '
'does not equal size of file or directory after transfer to '
'shared storage ({}) size {} bytes'.format(
tx_file, want_size, final_file, transfer_size)
)
utils.remove_safe(tmp_file) | Move transaction file to final location,
with size checks avoiding failed transfers.
Creates an empty file with '.bcbiotmp' extention in the destination
location, which serves as a flag. If a file like that is present,
it means that transaction didn't finish successfully. | Below is the the instruction that describes the task:
### Input:
Move transaction file to final location,
with size checks avoiding failed transfers.
Creates an empty file with '.bcbiotmp' extention in the destination
location, which serves as a flag. If a file like that is present,
it means that transaction didn't finish successfully.
### Response:
def _move_file_with_sizecheck(tx_file, final_file):
"""Move transaction file to final location,
with size checks avoiding failed transfers.
Creates an empty file with '.bcbiotmp' extention in the destination
location, which serves as a flag. If a file like that is present,
it means that transaction didn't finish successfully.
"""
#logger.debug("Moving %s to %s" % (tx_file, final_file))
tmp_file = final_file + ".bcbiotmp"
open(tmp_file, 'wb').close()
want_size = utils.get_size(tx_file)
shutil.move(tx_file, final_file)
transfer_size = utils.get_size(final_file)
assert want_size == transfer_size, (
'distributed.transaction.file_transaction: File copy error: '
'file or directory on temporary storage ({}) size {} bytes '
'does not equal size of file or directory after transfer to '
'shared storage ({}) size {} bytes'.format(
tx_file, want_size, final_file, transfer_size)
)
utils.remove_safe(tmp_file) |
def text_to_title(value):
"""when a title is required, generate one from the value"""
title = None
if not value:
return title
words = value.split(" ")
keep_words = []
for word in words:
if word.endswith(".") or word.endswith(":"):
keep_words.append(word)
if len(word) > 1 and "<italic>" not in word and "<i>" not in word:
break
else:
keep_words.append(word)
if len(keep_words) > 0:
title = " ".join(keep_words)
if title.split(" ")[-1] != "spp.":
title = title.rstrip(" .:")
return title | when a title is required, generate one from the value | Below is the the instruction that describes the task:
### Input:
when a title is required, generate one from the value
### Response:
def text_to_title(value):
"""when a title is required, generate one from the value"""
title = None
if not value:
return title
words = value.split(" ")
keep_words = []
for word in words:
if word.endswith(".") or word.endswith(":"):
keep_words.append(word)
if len(word) > 1 and "<italic>" not in word and "<i>" not in word:
break
else:
keep_words.append(word)
if len(keep_words) > 0:
title = " ".join(keep_words)
if title.split(" ")[-1] != "spp.":
title = title.rstrip(" .:")
return title |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.