code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def login(self):
"""Set http session."""
if self._session is None:
self._session = requests.session()
# adding fake user-agent header
self._session.headers.update({'User-agent': str(UserAgent().random)})
return self._post_login_page() | Set http session. | Below is the the instruction that describes the task:
### Input:
Set http session.
### Response:
def login(self):
"""Set http session."""
if self._session is None:
self._session = requests.session()
# adding fake user-agent header
self._session.headers.update({'User-agent': str(UserAgent().random)})
return self._post_login_page() |
def get_version():
""" str: The package version. """
global_vars = {}
# Compile and execute the individual file to prevent
# the package from being automatically loaded.
source = read(os.path.join("capybara", "version.py"))
code = compile(source, "version.py", "exec")
exec(code, global_vars)
return global_vars['__version__'] | str: The package version. | Below is the the instruction that describes the task:
### Input:
str: The package version.
### Response:
def get_version():
""" str: The package version. """
global_vars = {}
# Compile and execute the individual file to prevent
# the package from being automatically loaded.
source = read(os.path.join("capybara", "version.py"))
code = compile(source, "version.py", "exec")
exec(code, global_vars)
return global_vars['__version__'] |
def unique_field(self, field_name):
"""set a unique field to be selected, this is automatically called when you do unique_FIELDNAME(...)"""
self.fields_set.options["unique"] = True
return self.select_field(field_name) | set a unique field to be selected, this is automatically called when you do unique_FIELDNAME(...) | Below is the the instruction that describes the task:
### Input:
set a unique field to be selected, this is automatically called when you do unique_FIELDNAME(...)
### Response:
def unique_field(self, field_name):
"""set a unique field to be selected, this is automatically called when you do unique_FIELDNAME(...)"""
self.fields_set.options["unique"] = True
return self.select_field(field_name) |
def _prepare_for_submission(self,tempfolder, inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this "
"calculation")
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
structure = inputdict.pop(self.get_linkname('structure'))
except KeyError:
raise InputValidationError("No structure specified for this "
"calculation")
if not isinstance(structure,StructureData):
raise InputValidationError("structure node is not of type"
"StructureData")
try:
settings = inputdict.pop(self.get_linkname('settings'),None)
except KeyError:
pass
if settings is not None:
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
kpoints = inputdict.pop(self.get_linkname('kpoints'),None)
except KeyError:
pass
if kpoints is not None:
if not isinstance(kpoints, KpointsData):
raise InputValidationError("kpoints is not of type KpointsData")
##############################
# END OF INITIAL INPUT CHECK #
##############################
# default atom getter: I will always retrieve the total energy at least
default_atoms_getters = [ ["total_energy",""] ]
# ================================
# save the structure in ase format
atoms = structure.get_ase()
atoms.write(tempfolder.get_abs_path(self._input_aseatoms))
# ================== prepare the arguments of functions ================
parameters_dict = parameters.get_dict()
settings_dict = settings.get_dict() if settings is not None else {}
# ==================== fix the args of the optimizer
optimizer = parameters_dict.pop("optimizer",None)
if optimizer is not None:
# Validation
if not isinstance(optimizer,dict):
raise InputValidationError("optimizer key must contain a dictionary")
# get the name of the optimizer
optimizer_name = optimizer.pop("name",None)
if optimizer_name is None:
raise InputValidationError("Don't have access to the optimizer name")
# prepare the arguments to be passed to the optimizer class
optimizer_argsstr = "atoms, " + convert_the_args(optimizer.pop("args",[]))
# prepare the arguments to be passed to optimizer.run()
optimizer_runargsstr = convert_the_args(optimizer.pop("run_args",[]))
# prepare the import string
optimizer_import_string = get_optimizer_impstr(optimizer_name)
# ================= determine the calculator name and its import ====
calculator = parameters_dict.pop("calculator",{})
calculator_import_string = get_calculator_impstr(calculator.pop("name",None))
# =================== prepare the arguments for the calculator call
read_calc_args = calculator.pop("args",[])
#calc_args = calculator.pop("args",None)
if read_calc_args is None:
calc_argsstr = ""
else:
# transform a in "a" if a is a string (needed for formatting)
calc_args = {}
for k,v in read_calc_args.iteritems():
if isinstance(v, basestring):
the_v = '"{}"'.format(v)
else:
the_v = v
calc_args[k] = the_v
def return_a_function(v):
try:
has_magic = "@function" in v.keys()
except AttributeError:
has_magic = False
if has_magic:
args_dict = {}
for k2,v2 in v['args'].iteritems():
if isinstance(v2,basestring):
the_v = '"{}"'.format(v2)
else:
the_v = v2
args_dict[k2] = the_v
v2 = "{}({})".format(v['@function'],
", ".join(["{}={}".format(k_,v_)
for k_,v_ in args_dict.iteritems()]))
return v2
else:
return v
tmp_list = [ "{}={}".format(k,return_a_function(v))
for k,v in calc_args.iteritems() ]
calc_argsstr = ", ".join( tmp_list )
# add kpoints if present
if kpoints:
#TODO: here only the mesh is supported
# maybe kpoint lists are supported as well in ASE calculators
try:
mesh = kpoints.get_kpoints_mesh()[0]
except AttributeError:
raise InputValidationError("Coudn't find a mesh of kpoints"
" in the KpointsData")
calc_argsstr = ", ".join( [calc_argsstr] + ["kpts=({},{},{})".format( *mesh )] )
# =============== prepare the methods of atoms.get(), to save results
atoms_getters = default_atoms_getters + convert_the_getters( parameters_dict.pop("atoms_getters",[]) )
# =============== prepare the methods of calculator.get(), to save results
calculator_getters = convert_the_getters( parameters_dict.pop("calculator_getters",[]) )
# ===================== build the strings with the module imports
all_imports = ["import ase", 'import ase.io', "import json",
"import numpy", calculator_import_string]
if optimizer is not None:
all_imports.append(optimizer_import_string)
try:
if "PW" in calc_args['mode'].values():
all_imports.append("from gpaw import PW")
except KeyError:
pass
extra_imports = parameters_dict.pop("extra_imports",[])
for i in extra_imports:
if isinstance(i,basestring):
all_imports.append("import {}".format(i))
elif isinstance(i,(list,tuple)):
if not all( [isinstance(j,basestring) for j in i] ):
raise ValueError("extra import must contain strings")
if len(i)==2:
all_imports.append("from {} import {}".format(*i))
elif len(i)==3:
all_imports.append("from {} import {} as {}".format(*i))
else:
raise ValueError("format for extra imports not recognized")
else:
raise ValueError("format for extra imports not recognized")
if self.get_withmpi():
all_imports.append( "from ase.parallel import paropen" )
all_imports_string = "\n".join(all_imports) + "\n"
# =================== prepare the python script ========================
input_txt = ""
input_txt += get_file_header()
input_txt += "# calculation pk: {}\n".format(self.pk)
input_txt += "\n"
input_txt += all_imports_string
input_txt += "\n"
pre_lines = parameters_dict.pop("pre_lines",None)
if pre_lines is not None:
if not isinstance(pre_lines,(list,tuple)):
raise ValueError("Prelines must be a list of strings")
if not all( [isinstance(_,basestring) for _ in pre_lines] ):
raise ValueError("Prelines must be a list of strings")
input_txt += "\n".join(pre_lines) + "\n\n"
input_txt += "atoms = ase.io.read('{}')\n".format(self._input_aseatoms)
input_txt += "\n"
input_txt += "calculator = custom_calculator({})\n".format(calc_argsstr)
input_txt += "atoms.set_calculator(calculator)\n"
input_txt += "\n"
if optimizer is not None:
# here block the trajectory file name: trajectory = 'aiida.traj'
input_txt += "optimizer = custom_optimizer({})\n".format(optimizer_argsstr)
input_txt += "optimizer.run({})\n".format(optimizer_runargsstr)
input_txt += "\n"
# now dump / calculate the results
input_txt += "results = {}\n"
for getter,getter_args in atoms_getters:
input_txt += "results['{}'] = atoms.get_{}({})\n".format(getter,
getter,
getter_args)
input_txt += "\n"
for getter,getter_args in calculator_getters:
input_txt += "results['{}'] = calculator.get_{}({})\n".format(getter,
getter,
getter_args)
input_txt += "\n"
# Convert to lists
input_txt += "for k,v in results.iteritems():\n"
input_txt += " if isinstance(results[k],(numpy.matrix,numpy.ndarray)):\n"
input_txt += " results[k] = results[k].tolist()\n"
input_txt += "\n"
post_lines = parameters_dict.pop("post_lines",None)
if post_lines is not None:
if not isinstance(post_lines,(list,tuple)):
raise ValueError("Postlines must be a list of strings")
if not all( [isinstance(_,basestring) for _ in post_lines] ):
raise ValueError("Postlines must be a list of strings")
input_txt += "\n".join(post_lines) + "\n\n"
# Dump results to file
right_open = "paropen" if self.get_withmpi() else "open"
input_txt += "with {}('{}', 'w') as f:\n".format(right_open, self._OUTPUT_FILE_NAME)
input_txt += " json.dump(results,f)"
input_txt += "\n"
# Dump trajectory if present
if optimizer is not None:
input_txt += "atoms.write('{}')\n".format(self._output_aseatoms)
input_txt += "\n"
# write all the input script to a file
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename,'w') as infile:
infile.write(input_txt)
# ============================ calcinfo ================================
# TODO: look at the qmmm infoL: it might be necessary to put
# some singlefiles in the directory.
# right now it has to be taken care in the pre_lines
local_copy_list = []
remote_copy_list = []
additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[])
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
# Empty command line by default
# calcinfo.cmdline_params = settings_dict.pop('CMDLINE', [])
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
codeinfo = CodeInfo()
codeinfo.cmdline_params = [self._INPUT_FILE_NAME]
#calcinfo.stdin_name = self._INPUT_FILE_NAME
codeinfo.stdout_name = self._TXT_OUTPUT_FILE_NAME
codeinfo.code_uuid = code.uuid
calcinfo.codes_info = [codeinfo]
# Retrieve files
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME)
calcinfo.retrieve_list.append(self._output_aseatoms)
calcinfo.retrieve_list += additional_retrieve_list
# TODO: I should have two ways of running it: with gpaw-python in parallel
# and executing python if in serial
return calcinfo | This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!) | Below is the the instruction that describes the task:
### Input:
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
### Response:
def _prepare_for_submission(self,tempfolder, inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this "
"calculation")
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
structure = inputdict.pop(self.get_linkname('structure'))
except KeyError:
raise InputValidationError("No structure specified for this "
"calculation")
if not isinstance(structure,StructureData):
raise InputValidationError("structure node is not of type"
"StructureData")
try:
settings = inputdict.pop(self.get_linkname('settings'),None)
except KeyError:
pass
if settings is not None:
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
kpoints = inputdict.pop(self.get_linkname('kpoints'),None)
except KeyError:
pass
if kpoints is not None:
if not isinstance(kpoints, KpointsData):
raise InputValidationError("kpoints is not of type KpointsData")
##############################
# END OF INITIAL INPUT CHECK #
##############################
# default atom getter: I will always retrieve the total energy at least
default_atoms_getters = [ ["total_energy",""] ]
# ================================
# save the structure in ase format
atoms = structure.get_ase()
atoms.write(tempfolder.get_abs_path(self._input_aseatoms))
# ================== prepare the arguments of functions ================
parameters_dict = parameters.get_dict()
settings_dict = settings.get_dict() if settings is not None else {}
# ==================== fix the args of the optimizer
optimizer = parameters_dict.pop("optimizer",None)
if optimizer is not None:
# Validation
if not isinstance(optimizer,dict):
raise InputValidationError("optimizer key must contain a dictionary")
# get the name of the optimizer
optimizer_name = optimizer.pop("name",None)
if optimizer_name is None:
raise InputValidationError("Don't have access to the optimizer name")
# prepare the arguments to be passed to the optimizer class
optimizer_argsstr = "atoms, " + convert_the_args(optimizer.pop("args",[]))
# prepare the arguments to be passed to optimizer.run()
optimizer_runargsstr = convert_the_args(optimizer.pop("run_args",[]))
# prepare the import string
optimizer_import_string = get_optimizer_impstr(optimizer_name)
# ================= determine the calculator name and its import ====
calculator = parameters_dict.pop("calculator",{})
calculator_import_string = get_calculator_impstr(calculator.pop("name",None))
# =================== prepare the arguments for the calculator call
read_calc_args = calculator.pop("args",[])
#calc_args = calculator.pop("args",None)
if read_calc_args is None:
calc_argsstr = ""
else:
# transform a in "a" if a is a string (needed for formatting)
calc_args = {}
for k,v in read_calc_args.iteritems():
if isinstance(v, basestring):
the_v = '"{}"'.format(v)
else:
the_v = v
calc_args[k] = the_v
def return_a_function(v):
try:
has_magic = "@function" in v.keys()
except AttributeError:
has_magic = False
if has_magic:
args_dict = {}
for k2,v2 in v['args'].iteritems():
if isinstance(v2,basestring):
the_v = '"{}"'.format(v2)
else:
the_v = v2
args_dict[k2] = the_v
v2 = "{}({})".format(v['@function'],
", ".join(["{}={}".format(k_,v_)
for k_,v_ in args_dict.iteritems()]))
return v2
else:
return v
tmp_list = [ "{}={}".format(k,return_a_function(v))
for k,v in calc_args.iteritems() ]
calc_argsstr = ", ".join( tmp_list )
# add kpoints if present
if kpoints:
#TODO: here only the mesh is supported
# maybe kpoint lists are supported as well in ASE calculators
try:
mesh = kpoints.get_kpoints_mesh()[0]
except AttributeError:
raise InputValidationError("Coudn't find a mesh of kpoints"
" in the KpointsData")
calc_argsstr = ", ".join( [calc_argsstr] + ["kpts=({},{},{})".format( *mesh )] )
# =============== prepare the methods of atoms.get(), to save results
atoms_getters = default_atoms_getters + convert_the_getters( parameters_dict.pop("atoms_getters",[]) )
# =============== prepare the methods of calculator.get(), to save results
calculator_getters = convert_the_getters( parameters_dict.pop("calculator_getters",[]) )
# ===================== build the strings with the module imports
all_imports = ["import ase", 'import ase.io', "import json",
"import numpy", calculator_import_string]
if optimizer is not None:
all_imports.append(optimizer_import_string)
try:
if "PW" in calc_args['mode'].values():
all_imports.append("from gpaw import PW")
except KeyError:
pass
extra_imports = parameters_dict.pop("extra_imports",[])
for i in extra_imports:
if isinstance(i,basestring):
all_imports.append("import {}".format(i))
elif isinstance(i,(list,tuple)):
if not all( [isinstance(j,basestring) for j in i] ):
raise ValueError("extra import must contain strings")
if len(i)==2:
all_imports.append("from {} import {}".format(*i))
elif len(i)==3:
all_imports.append("from {} import {} as {}".format(*i))
else:
raise ValueError("format for extra imports not recognized")
else:
raise ValueError("format for extra imports not recognized")
if self.get_withmpi():
all_imports.append( "from ase.parallel import paropen" )
all_imports_string = "\n".join(all_imports) + "\n"
# =================== prepare the python script ========================
input_txt = ""
input_txt += get_file_header()
input_txt += "# calculation pk: {}\n".format(self.pk)
input_txt += "\n"
input_txt += all_imports_string
input_txt += "\n"
pre_lines = parameters_dict.pop("pre_lines",None)
if pre_lines is not None:
if not isinstance(pre_lines,(list,tuple)):
raise ValueError("Prelines must be a list of strings")
if not all( [isinstance(_,basestring) for _ in pre_lines] ):
raise ValueError("Prelines must be a list of strings")
input_txt += "\n".join(pre_lines) + "\n\n"
input_txt += "atoms = ase.io.read('{}')\n".format(self._input_aseatoms)
input_txt += "\n"
input_txt += "calculator = custom_calculator({})\n".format(calc_argsstr)
input_txt += "atoms.set_calculator(calculator)\n"
input_txt += "\n"
if optimizer is not None:
# here block the trajectory file name: trajectory = 'aiida.traj'
input_txt += "optimizer = custom_optimizer({})\n".format(optimizer_argsstr)
input_txt += "optimizer.run({})\n".format(optimizer_runargsstr)
input_txt += "\n"
# now dump / calculate the results
input_txt += "results = {}\n"
for getter,getter_args in atoms_getters:
input_txt += "results['{}'] = atoms.get_{}({})\n".format(getter,
getter,
getter_args)
input_txt += "\n"
for getter,getter_args in calculator_getters:
input_txt += "results['{}'] = calculator.get_{}({})\n".format(getter,
getter,
getter_args)
input_txt += "\n"
# Convert to lists
input_txt += "for k,v in results.iteritems():\n"
input_txt += " if isinstance(results[k],(numpy.matrix,numpy.ndarray)):\n"
input_txt += " results[k] = results[k].tolist()\n"
input_txt += "\n"
post_lines = parameters_dict.pop("post_lines",None)
if post_lines is not None:
if not isinstance(post_lines,(list,tuple)):
raise ValueError("Postlines must be a list of strings")
if not all( [isinstance(_,basestring) for _ in post_lines] ):
raise ValueError("Postlines must be a list of strings")
input_txt += "\n".join(post_lines) + "\n\n"
# Dump results to file
right_open = "paropen" if self.get_withmpi() else "open"
input_txt += "with {}('{}', 'w') as f:\n".format(right_open, self._OUTPUT_FILE_NAME)
input_txt += " json.dump(results,f)"
input_txt += "\n"
# Dump trajectory if present
if optimizer is not None:
input_txt += "atoms.write('{}')\n".format(self._output_aseatoms)
input_txt += "\n"
# write all the input script to a file
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename,'w') as infile:
infile.write(input_txt)
# ============================ calcinfo ================================
# TODO: look at the qmmm infoL: it might be necessary to put
# some singlefiles in the directory.
# right now it has to be taken care in the pre_lines
local_copy_list = []
remote_copy_list = []
additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[])
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
# Empty command line by default
# calcinfo.cmdline_params = settings_dict.pop('CMDLINE', [])
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
codeinfo = CodeInfo()
codeinfo.cmdline_params = [self._INPUT_FILE_NAME]
#calcinfo.stdin_name = self._INPUT_FILE_NAME
codeinfo.stdout_name = self._TXT_OUTPUT_FILE_NAME
codeinfo.code_uuid = code.uuid
calcinfo.codes_info = [codeinfo]
# Retrieve files
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME)
calcinfo.retrieve_list.append(self._output_aseatoms)
calcinfo.retrieve_list += additional_retrieve_list
# TODO: I should have two ways of running it: with gpaw-python in parallel
# and executing python if in serial
return calcinfo |
def _router_request(router, method, data=None):
'''
Make a request to the Zenoss API router
'''
if router not in ROUTERS:
return False
req_data = salt.utils.json.dumps([dict(
action=router,
method=method,
data=data,
type='rpc',
tid=1)])
config = __salt__['config.option']('zenoss')
log.debug('Making request to router %s with method %s', router, method)
url = '{0}/zport/dmd/{1}_router'.format(config.get('hostname'), ROUTERS[router])
response = _session().post(url, data=req_data)
# The API returns a 200 response code even whe auth is bad.
# With bad auth, the login page is displayed. Here I search for
# an element on the login form to determine if auth failed.
if re.search('name="__ac_name"', response.content):
log.error('Request failed. Bad username/password.')
raise Exception('Request failed. Bad username/password.')
return salt.utils.json.loads(response.content).get('result', None) | Make a request to the Zenoss API router | Below is the the instruction that describes the task:
### Input:
Make a request to the Zenoss API router
### Response:
def _router_request(router, method, data=None):
'''
Make a request to the Zenoss API router
'''
if router not in ROUTERS:
return False
req_data = salt.utils.json.dumps([dict(
action=router,
method=method,
data=data,
type='rpc',
tid=1)])
config = __salt__['config.option']('zenoss')
log.debug('Making request to router %s with method %s', router, method)
url = '{0}/zport/dmd/{1}_router'.format(config.get('hostname'), ROUTERS[router])
response = _session().post(url, data=req_data)
# The API returns a 200 response code even whe auth is bad.
# With bad auth, the login page is displayed. Here I search for
# an element on the login form to determine if auth failed.
if re.search('name="__ac_name"', response.content):
log.error('Request failed. Bad username/password.')
raise Exception('Request failed. Bad username/password.')
return salt.utils.json.loads(response.content).get('result', None) |
def _node_to_model(tree_or_item, metadata=None, parent=None,
lucent_id=cnxepub.TRANSLUCENT_BINDER_ID):
"""Given a tree, parse to a set of models"""
if 'contents' in tree_or_item:
# It is a binder.
tree = tree_or_item
binder = cnxepub.TranslucentBinder(metadata=tree)
for item in tree['contents']:
node = _node_to_model(item, parent=binder,
lucent_id=lucent_id)
if node.metadata['title'] != item['title']:
binder.set_title_for_node(node, item['title'])
result = binder
else:
# It is an item pointing at a document.
item = tree_or_item
result = cnxepub.DocumentPointer(item['id'], metadata=item)
if parent is not None:
parent.append(result)
return result | Given a tree, parse to a set of models | Below is the the instruction that describes the task:
### Input:
Given a tree, parse to a set of models
### Response:
def _node_to_model(tree_or_item, metadata=None, parent=None,
lucent_id=cnxepub.TRANSLUCENT_BINDER_ID):
"""Given a tree, parse to a set of models"""
if 'contents' in tree_or_item:
# It is a binder.
tree = tree_or_item
binder = cnxepub.TranslucentBinder(metadata=tree)
for item in tree['contents']:
node = _node_to_model(item, parent=binder,
lucent_id=lucent_id)
if node.metadata['title'] != item['title']:
binder.set_title_for_node(node, item['title'])
result = binder
else:
# It is an item pointing at a document.
item = tree_or_item
result = cnxepub.DocumentPointer(item['id'], metadata=item)
if parent is not None:
parent.append(result)
return result |
def get_referenced_object_as_list(
prev_obj, obj, dot_separated_name, desired_type=None):
"""
Same as get_referenced_object, but always returns a list.
Args:
prev_obj: see get_referenced_object
obj: see get_referenced_object
dot_separated_name: see get_referenced_object
desired_type: see get_referenced_object
Returns:
same as get_referenced_object, but always returns a list
"""
res = get_referenced_object(prev_obj, obj, dot_separated_name,
desired_type)
if res is None:
return []
elif type(res) is list:
return res
else:
return [res] | Same as get_referenced_object, but always returns a list.
Args:
prev_obj: see get_referenced_object
obj: see get_referenced_object
dot_separated_name: see get_referenced_object
desired_type: see get_referenced_object
Returns:
same as get_referenced_object, but always returns a list | Below is the the instruction that describes the task:
### Input:
Same as get_referenced_object, but always returns a list.
Args:
prev_obj: see get_referenced_object
obj: see get_referenced_object
dot_separated_name: see get_referenced_object
desired_type: see get_referenced_object
Returns:
same as get_referenced_object, but always returns a list
### Response:
def get_referenced_object_as_list(
prev_obj, obj, dot_separated_name, desired_type=None):
"""
Same as get_referenced_object, but always returns a list.
Args:
prev_obj: see get_referenced_object
obj: see get_referenced_object
dot_separated_name: see get_referenced_object
desired_type: see get_referenced_object
Returns:
same as get_referenced_object, but always returns a list
"""
res = get_referenced_object(prev_obj, obj, dot_separated_name,
desired_type)
if res is None:
return []
elif type(res) is list:
return res
else:
return [res] |
def list_available_solvers():
"""Determine available solver interfaces (with python bindings).
Returns
-------
dict
A dict like {'GLPK': True, 'GUROBI': False, ...}
"""
solvers = dict(GUROBI=False, GLPK=False, MOSEK=False, CPLEX=False, SCIPY=False)
try:
import gurobipy
solvers['GUROBI'] = True
log.debug('Gurobi python bindings found at %s' % os.path.dirname(gurobipy.__file__))
except Exception:
log.debug('Gurobi python bindings not available.')
try:
import swiglpk
solvers['GLPK'] = True
log.debug('GLPK python bindings found at %s' % os.path.dirname(swiglpk.__file__))
except Exception:
log.debug('GLPK python bindings not available.')
try:
import mosek
solvers['MOSEK'] = True
log.debug('Mosek python bindings found at %s' % os.path.dirname(mosek.__file__))
except Exception:
log.debug('Mosek python bindings not available.')
try:
import cplex
solvers['CPLEX'] = True
log.debug('CPLEX python bindings found at %s' % os.path.dirname(cplex.__file__))
except Exception:
log.debug('CPLEX python bindings not available.')
try:
from scipy import optimize
optimize.linprog
solvers["SCIPY"] = True
log.debug("Scipy linprog function found at %s" % optimize.__file__)
except (ImportError, AttributeError):
log.debug("Scipy solver not available")
return solvers | Determine available solver interfaces (with python bindings).
Returns
-------
dict
A dict like {'GLPK': True, 'GUROBI': False, ...} | Below is the the instruction that describes the task:
### Input:
Determine available solver interfaces (with python bindings).
Returns
-------
dict
A dict like {'GLPK': True, 'GUROBI': False, ...}
### Response:
def list_available_solvers():
"""Determine available solver interfaces (with python bindings).
Returns
-------
dict
A dict like {'GLPK': True, 'GUROBI': False, ...}
"""
solvers = dict(GUROBI=False, GLPK=False, MOSEK=False, CPLEX=False, SCIPY=False)
try:
import gurobipy
solvers['GUROBI'] = True
log.debug('Gurobi python bindings found at %s' % os.path.dirname(gurobipy.__file__))
except Exception:
log.debug('Gurobi python bindings not available.')
try:
import swiglpk
solvers['GLPK'] = True
log.debug('GLPK python bindings found at %s' % os.path.dirname(swiglpk.__file__))
except Exception:
log.debug('GLPK python bindings not available.')
try:
import mosek
solvers['MOSEK'] = True
log.debug('Mosek python bindings found at %s' % os.path.dirname(mosek.__file__))
except Exception:
log.debug('Mosek python bindings not available.')
try:
import cplex
solvers['CPLEX'] = True
log.debug('CPLEX python bindings found at %s' % os.path.dirname(cplex.__file__))
except Exception:
log.debug('CPLEX python bindings not available.')
try:
from scipy import optimize
optimize.linprog
solvers["SCIPY"] = True
log.debug("Scipy linprog function found at %s" % optimize.__file__)
except (ImportError, AttributeError):
log.debug("Scipy solver not available")
return solvers |
def cee_map_priority_table_map_cos0_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos0_pgid = ET.SubElement(priority_table, "map-cos0-pgid")
map_cos0_pgid.text = kwargs.pop('map_cos0_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def cee_map_priority_table_map_cos0_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos0_pgid = ET.SubElement(priority_table, "map-cos0-pgid")
map_cos0_pgid.text = kwargs.pop('map_cos0_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _validate_param(rtype, fields):
""" Ensure the sparse fields exists on the models """
try:
# raises ValueError if not found
model = rtype_to_model(rtype)
model_fields = model.all_fields
except ValueError:
raise InvalidQueryParams(**{
'detail': 'The fields query param provided with a '
'field type of "%s" is unknown.' % rtype,
'links': LINK,
'parameter': PARAM,
})
for field in fields:
if field not in model_fields:
raise InvalidQueryParams(**{
'detail': 'The fields query param "TYPE" of "%s" '
'is not possible. It does not have a field '
'by the name of "%s".' % (rtype, field),
'links': LINK,
'parameter': PARAM,
}) | Ensure the sparse fields exists on the models | Below is the the instruction that describes the task:
### Input:
Ensure the sparse fields exists on the models
### Response:
def _validate_param(rtype, fields):
""" Ensure the sparse fields exists on the models """
try:
# raises ValueError if not found
model = rtype_to_model(rtype)
model_fields = model.all_fields
except ValueError:
raise InvalidQueryParams(**{
'detail': 'The fields query param provided with a '
'field type of "%s" is unknown.' % rtype,
'links': LINK,
'parameter': PARAM,
})
for field in fields:
if field not in model_fields:
raise InvalidQueryParams(**{
'detail': 'The fields query param "TYPE" of "%s" '
'is not possible. It does not have a field '
'by the name of "%s".' % (rtype, field),
'links': LINK,
'parameter': PARAM,
}) |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(UpdateStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the storage value to update (string)."
opt = "expression"
if opt not in options:
options[opt] = "int({X} + 1)"
if opt not in self.help:
self.help[opt] = "The expression for updating the storage value; use {X} for current value (string)."
return options | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
### Response:
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(UpdateStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the storage value to update (string)."
opt = "expression"
if opt not in options:
options[opt] = "int({X} + 1)"
if opt not in self.help:
self.help[opt] = "The expression for updating the storage value; use {X} for current value (string)."
return options |
def _ssh_channel_read(ssh_channel_int, count, is_stderr):
"""Do a read on a channel."""
buffer_ = create_string_buffer(count)
while 1:
received_bytes = c_ssh_channel_read(ssh_channel_int,
cast(buffer_, c_void_p),
c_uint32(count),
c_int(int(is_stderr)))
if received_bytes == SSH_ERROR:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Channel read failed: %s" % (error))
# BUG: We're not using the nonblocking variant, but this can still
# return SSH_AGAIN due to that call's broken dependencies.
# TODO: This call might return SSH_AGAIN, even though we should always be
# blocking. Reported as bug #115.
elif received_bytes == SSH_AGAIN:
continue
else:
break
# TODO: Where is the timeout configured for the read?
return buffer_.raw[0:received_bytes] | Do a read on a channel. | Below is the the instruction that describes the task:
### Input:
Do a read on a channel.
### Response:
def _ssh_channel_read(ssh_channel_int, count, is_stderr):
"""Do a read on a channel."""
buffer_ = create_string_buffer(count)
while 1:
received_bytes = c_ssh_channel_read(ssh_channel_int,
cast(buffer_, c_void_p),
c_uint32(count),
c_int(int(is_stderr)))
if received_bytes == SSH_ERROR:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Channel read failed: %s" % (error))
# BUG: We're not using the nonblocking variant, but this can still
# return SSH_AGAIN due to that call's broken dependencies.
# TODO: This call might return SSH_AGAIN, even though we should always be
# blocking. Reported as bug #115.
elif received_bytes == SSH_AGAIN:
continue
else:
break
# TODO: Where is the timeout configured for the read?
return buffer_.raw[0:received_bytes] |
def save_reg(data):
'''
Save the register to msgpack files
'''
reg_dir = _reg_dir()
regfile = os.path.join(reg_dir, 'register')
try:
if not os.path.exists(reg_dir):
os.makedirs(reg_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
try:
with salt.utils.files.fopen(regfile, 'a') as fh_:
salt.utils.msgpack.dump(data, fh_)
except Exception:
log.error('Could not write to msgpack file %s', __opts__['outdir'])
raise | Save the register to msgpack files | Below is the the instruction that describes the task:
### Input:
Save the register to msgpack files
### Response:
def save_reg(data):
'''
Save the register to msgpack files
'''
reg_dir = _reg_dir()
regfile = os.path.join(reg_dir, 'register')
try:
if not os.path.exists(reg_dir):
os.makedirs(reg_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
try:
with salt.utils.files.fopen(regfile, 'a') as fh_:
salt.utils.msgpack.dump(data, fh_)
except Exception:
log.error('Could not write to msgpack file %s', __opts__['outdir'])
raise |
def fromString(cls, string):
"""
Convert a serialized Unicode string to a L{TaskLevel}.
@param string: Output of L{TaskLevel.toString}.
@return: L{TaskLevel} parsed from the string.
"""
return cls(level=[int(i) for i in string.split("/") if i]) | Convert a serialized Unicode string to a L{TaskLevel}.
@param string: Output of L{TaskLevel.toString}.
@return: L{TaskLevel} parsed from the string. | Below is the the instruction that describes the task:
### Input:
Convert a serialized Unicode string to a L{TaskLevel}.
@param string: Output of L{TaskLevel.toString}.
@return: L{TaskLevel} parsed from the string.
### Response:
def fromString(cls, string):
"""
Convert a serialized Unicode string to a L{TaskLevel}.
@param string: Output of L{TaskLevel.toString}.
@return: L{TaskLevel} parsed from the string.
"""
return cls(level=[int(i) for i in string.split("/") if i]) |
def set_simulate(self, status):
"""Set the simulation status.
:param status: Value to set the simulation
:type status: bool
:returns: None
:raises: InvalidInput
"""
if type(status) != bool:
raise InvalidInput("Status value must be bool")
self._simulate = bool2int(status) | Set the simulation status.
:param status: Value to set the simulation
:type status: bool
:returns: None
:raises: InvalidInput | Below is the the instruction that describes the task:
### Input:
Set the simulation status.
:param status: Value to set the simulation
:type status: bool
:returns: None
:raises: InvalidInput
### Response:
def set_simulate(self, status):
"""Set the simulation status.
:param status: Value to set the simulation
:type status: bool
:returns: None
:raises: InvalidInput
"""
if type(status) != bool:
raise InvalidInput("Status value must be bool")
self._simulate = bool2int(status) |
def transactional_async(func, args, kwds, **options):
"""The async version of @ndb.transaction."""
options.setdefault('propagation', datastore_rpc.TransactionOptions.ALLOWED)
if args or kwds:
return transaction_async(lambda: func(*args, **kwds), **options)
return transaction_async(func, **options) | The async version of @ndb.transaction. | Below is the the instruction that describes the task:
### Input:
The async version of @ndb.transaction.
### Response:
def transactional_async(func, args, kwds, **options):
"""The async version of @ndb.transaction."""
options.setdefault('propagation', datastore_rpc.TransactionOptions.ALLOWED)
if args or kwds:
return transaction_async(lambda: func(*args, **kwds), **options)
return transaction_async(func, **options) |
def _print_errs(self):
"""
Prints the errors trace with tracebacks
"""
i = 0
for error in self.errors:
print(self._errmsg(error, tb=True, i=i))
# for spacing
if self.errs_traceback is False:
print()
i += 1 | Prints the errors trace with tracebacks | Below is the the instruction that describes the task:
### Input:
Prints the errors trace with tracebacks
### Response:
def _print_errs(self):
"""
Prints the errors trace with tracebacks
"""
i = 0
for error in self.errors:
print(self._errmsg(error, tb=True, i=i))
# for spacing
if self.errs_traceback is False:
print()
i += 1 |
def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):
"""
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
"""
byte_contents = BytesIO(contents.encode('utf8'))
info = tarfile.TarInfo(name=name)
info.size = len(contents)
# mtime must be 0 or any checksum operation
# will return a different digest even when content is the same
info.mtime = 0
info.type = tarfile.REGTYPE
info.mode = int(mode, 8) # permissions converted to decimal notation
tar.addfile(tarinfo=info, fileobj=byte_contents) | Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None | Below is the the instruction that describes the task:
### Input:
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
### Response:
def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):
"""
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
"""
byte_contents = BytesIO(contents.encode('utf8'))
info = tarfile.TarInfo(name=name)
info.size = len(contents)
# mtime must be 0 or any checksum operation
# will return a different digest even when content is the same
info.mtime = 0
info.type = tarfile.REGTYPE
info.mode = int(mode, 8) # permissions converted to decimal notation
tar.addfile(tarinfo=info, fileobj=byte_contents) |
def parent(self):
"""Return the parent of the name.
@rtype: dns.name.Name object
@raises NoParent: the name is either the root name or the empty name,
and thus has no parent.
"""
if self == root or self == empty:
raise NoParent
return Name(self.labels[1:]) | Return the parent of the name.
@rtype: dns.name.Name object
@raises NoParent: the name is either the root name or the empty name,
and thus has no parent. | Below is the the instruction that describes the task:
### Input:
Return the parent of the name.
@rtype: dns.name.Name object
@raises NoParent: the name is either the root name or the empty name,
and thus has no parent.
### Response:
def parent(self):
"""Return the parent of the name.
@rtype: dns.name.Name object
@raises NoParent: the name is either the root name or the empty name,
and thus has no parent.
"""
if self == root or self == empty:
raise NoParent
return Name(self.labels[1:]) |
def _log_deprecation(self, deprecation_key):
"""
Logs a deprecation notice at most once per AgentCheck instance, for the pre-defined `deprecation_key`
"""
if not self._deprecations[deprecation_key][0]:
self.log.warning(self._deprecations[deprecation_key][1])
self._deprecations[deprecation_key][0] = True | Logs a deprecation notice at most once per AgentCheck instance, for the pre-defined `deprecation_key` | Below is the the instruction that describes the task:
### Input:
Logs a deprecation notice at most once per AgentCheck instance, for the pre-defined `deprecation_key`
### Response:
def _log_deprecation(self, deprecation_key):
"""
Logs a deprecation notice at most once per AgentCheck instance, for the pre-defined `deprecation_key`
"""
if not self._deprecations[deprecation_key][0]:
self.log.warning(self._deprecations[deprecation_key][1])
self._deprecations[deprecation_key][0] = True |
def unescape(s, unicode_action="replace"):
"""
Unescape HTML strings, and convert & etc.
"""
import HTMLParser
hp = HTMLParser.HTMLParser()
s = hp.unescape(s)
s = s.encode('ascii', unicode_action)
s = s.replace("\n", "").strip()
return s | Unescape HTML strings, and convert & etc. | Below is the the instruction that describes the task:
### Input:
Unescape HTML strings, and convert & etc.
### Response:
def unescape(s, unicode_action="replace"):
"""
Unescape HTML strings, and convert & etc.
"""
import HTMLParser
hp = HTMLParser.HTMLParser()
s = hp.unescape(s)
s = s.encode('ascii', unicode_action)
s = s.replace("\n", "").strip()
return s |
def typing(self, room: Room, timeout: int = 5000):
"""
Send typing event directly to api
Args:
room: room to send typing event to
timeout: timeout for the event, in ms
"""
path = f'/rooms/{quote(room.room_id)}/typing/{quote(self.user_id)}'
return self.api._send('PUT', path, {'typing': True, 'timeout': timeout}) | Send typing event directly to api
Args:
room: room to send typing event to
timeout: timeout for the event, in ms | Below is the the instruction that describes the task:
### Input:
Send typing event directly to api
Args:
room: room to send typing event to
timeout: timeout for the event, in ms
### Response:
def typing(self, room: Room, timeout: int = 5000):
"""
Send typing event directly to api
Args:
room: room to send typing event to
timeout: timeout for the event, in ms
"""
path = f'/rooms/{quote(room.room_id)}/typing/{quote(self.user_id)}'
return self.api._send('PUT', path, {'typing': True, 'timeout': timeout}) |
def ignore_missing_email_protection_eku_cb(ok, ctx):
"""
For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify().
The latter requires that ExtendedKeyUsage extension, if present,
contains 'emailProtection' OID. (Is it because S/MIME is/was the
primary use case for PKCS7?)
We do not want to fail the verification in this case. At present,
M2Crypto lacks possibility of removing or modifying an existing
extension. Let's assign a custom verification callback.
"""
# The error we want to ignore is indicated by X509_V_ERR_INVALID_PURPOSE.
err = ctx.get_error()
if err != m2.X509_V_ERR_INVALID_PURPOSE:
return ok
# PKCS7_verify() has this requriement only for the signing certificate.
# Do not modify the behavior for certificates upper in the chain.
if ctx.get_error_depth() > 0:
return ok
# There is another cause of ERR_INVALID_PURPOSE: incompatible keyUsage.
# Do not modify the default behavior in this case.
cert = ctx.get_current_cert()
try:
key_usage = cert.get_ext('keyUsage').get_value()
if 'digitalSignature' not in key_usage \
and 'nonRepudiation' not in key_usage:
return ok
except LookupError:
pass
# Here, keyUsage is either absent, or contains the needed bit(s).
# So ERR_INVALID_PURPOSE is caused by EKU not containing 'emailProtection'.
# Ignore this error.
return 1 | For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify().
The latter requires that ExtendedKeyUsage extension, if present,
contains 'emailProtection' OID. (Is it because S/MIME is/was the
primary use case for PKCS7?)
We do not want to fail the verification in this case. At present,
M2Crypto lacks possibility of removing or modifying an existing
extension. Let's assign a custom verification callback. | Below is the the instruction that describes the task:
### Input:
For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify().
The latter requires that ExtendedKeyUsage extension, if present,
contains 'emailProtection' OID. (Is it because S/MIME is/was the
primary use case for PKCS7?)
We do not want to fail the verification in this case. At present,
M2Crypto lacks possibility of removing or modifying an existing
extension. Let's assign a custom verification callback.
### Response:
def ignore_missing_email_protection_eku_cb(ok, ctx):
"""
For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify().
The latter requires that ExtendedKeyUsage extension, if present,
contains 'emailProtection' OID. (Is it because S/MIME is/was the
primary use case for PKCS7?)
We do not want to fail the verification in this case. At present,
M2Crypto lacks possibility of removing or modifying an existing
extension. Let's assign a custom verification callback.
"""
# The error we want to ignore is indicated by X509_V_ERR_INVALID_PURPOSE.
err = ctx.get_error()
if err != m2.X509_V_ERR_INVALID_PURPOSE:
return ok
# PKCS7_verify() has this requriement only for the signing certificate.
# Do not modify the behavior for certificates upper in the chain.
if ctx.get_error_depth() > 0:
return ok
# There is another cause of ERR_INVALID_PURPOSE: incompatible keyUsage.
# Do not modify the default behavior in this case.
cert = ctx.get_current_cert()
try:
key_usage = cert.get_ext('keyUsage').get_value()
if 'digitalSignature' not in key_usage \
and 'nonRepudiation' not in key_usage:
return ok
except LookupError:
pass
# Here, keyUsage is either absent, or contains the needed bit(s).
# So ERR_INVALID_PURPOSE is caused by EKU not containing 'emailProtection'.
# Ignore this error.
return 1 |
def hash(self):
"""Return an hash string computed on the PSF data."""
hash_list = []
for key, value in sorted(self.__dict__.items()):
if not callable(value):
if isinstance(value, np.ndarray):
hash_list.append(value.tostring())
else:
hash_list.append(str(value))
return hashlib.md5(repr(hash_list).encode()).hexdigest() | Return an hash string computed on the PSF data. | Below is the the instruction that describes the task:
### Input:
Return an hash string computed on the PSF data.
### Response:
def hash(self):
"""Return an hash string computed on the PSF data."""
hash_list = []
for key, value in sorted(self.__dict__.items()):
if not callable(value):
if isinstance(value, np.ndarray):
hash_list.append(value.tostring())
else:
hash_list.append(str(value))
return hashlib.md5(repr(hash_list).encode()).hexdigest() |
def account(self, url):
"""
Return accounts references for the given account id.
:param account_id:
:param accounts_password: The password for decrypting the secret
:return:
"""
from sqlalchemy.orm.exc import NoResultFound
from ambry.orm.exc import NotFoundError
from ambry.util import parse_url_to_dict
from ambry.orm import Account
pd = parse_url_to_dict(url)
# Old method of storing account information.
try:
act = self.database.session.query(Account).filter(Account.account_id == pd['netloc']).one()
act.secret_password = self._account_password
return act
except NoResultFound:
pass
# Try the remotes.
for r in self.remotes:
if url.startswith(r.url):
return r
raise NotFoundError("Did not find account for url: '{}' ".format(url)) | Return accounts references for the given account id.
:param account_id:
:param accounts_password: The password for decrypting the secret
:return: | Below is the the instruction that describes the task:
### Input:
Return accounts references for the given account id.
:param account_id:
:param accounts_password: The password for decrypting the secret
:return:
### Response:
def account(self, url):
"""
Return accounts references for the given account id.
:param account_id:
:param accounts_password: The password for decrypting the secret
:return:
"""
from sqlalchemy.orm.exc import NoResultFound
from ambry.orm.exc import NotFoundError
from ambry.util import parse_url_to_dict
from ambry.orm import Account
pd = parse_url_to_dict(url)
# Old method of storing account information.
try:
act = self.database.session.query(Account).filter(Account.account_id == pd['netloc']).one()
act.secret_password = self._account_password
return act
except NoResultFound:
pass
# Try the remotes.
for r in self.remotes:
if url.startswith(r.url):
return r
raise NotFoundError("Did not find account for url: '{}' ".format(url)) |
def store_sample_set(self, md5_list):
""" Store a sample set (which is just a list of md5s).
Note: All md5s must already be in the data store.
Args:
md5_list: a list of the md5s in this set (all must exist in data store)
Returns:
The md5 of the set (the actual md5 of the set)
"""
# Sanity check
if not md5_list:
print 'Warning: Trying to store an empty sample_set'
return None
# Remove any duplicates
md5_list = list(set(md5_list))
for md5 in md5_list:
if not self.has_sample(md5):
raise RuntimeError('%s: Not found! All items in sample_set must be in the datastore' % (md5))
set_md5 = hashlib.md5(str(md5_list)).hexdigest()
self._store_work_results({'md5_list':md5_list}, 'sample_set', set_md5)
return set_md5 | Store a sample set (which is just a list of md5s).
Note: All md5s must already be in the data store.
Args:
md5_list: a list of the md5s in this set (all must exist in data store)
Returns:
The md5 of the set (the actual md5 of the set) | Below is the the instruction that describes the task:
### Input:
Store a sample set (which is just a list of md5s).
Note: All md5s must already be in the data store.
Args:
md5_list: a list of the md5s in this set (all must exist in data store)
Returns:
The md5 of the set (the actual md5 of the set)
### Response:
def store_sample_set(self, md5_list):
""" Store a sample set (which is just a list of md5s).
Note: All md5s must already be in the data store.
Args:
md5_list: a list of the md5s in this set (all must exist in data store)
Returns:
The md5 of the set (the actual md5 of the set)
"""
# Sanity check
if not md5_list:
print 'Warning: Trying to store an empty sample_set'
return None
# Remove any duplicates
md5_list = list(set(md5_list))
for md5 in md5_list:
if not self.has_sample(md5):
raise RuntimeError('%s: Not found! All items in sample_set must be in the datastore' % (md5))
set_md5 = hashlib.md5(str(md5_list)).hexdigest()
self._store_work_results({'md5_list':md5_list}, 'sample_set', set_md5)
return set_md5 |
def generate_token(user_id, expire_in=None, data={}, issuer=None, iat=None):
"""Generate a new JWT token for this user_id. Default expiration date
is 1 year from creation time"""
assert user_id, "No user_id passed to generate_token()"
assert isinstance(data, dict), "generate_token(data=) should be a dictionary"
assert get_config().jwt_secret, "No JWT secret configured in pymacaron"
if not issuer:
issuer = get_config().jwt_issuer
assert issuer, "No JWT issuer configured for pymacaron"
if expire_in is None:
expire_in = get_config().jwt_token_timeout
if iat:
epoch_now = iat
else:
epoch_now = to_epoch(timenow())
epoch_end = epoch_now + expire_in
data['iss'] = issuer
data['sub'] = user_id
data['aud'] = get_config().jwt_audience
data['exp'] = epoch_end
data['iat'] = epoch_now
headers = {
"typ": "JWT",
"alg": "HS256",
"iss": issuer,
}
log.debug("Encoding token with data %s and headers %s (secret:%s****)" % (data, headers, get_config().jwt_secret[0:8]))
t = jwt.encode(
data,
get_config().jwt_secret,
headers=headers,
)
if type(t) is bytes:
t = t.decode("utf-8")
return t | Generate a new JWT token for this user_id. Default expiration date
is 1 year from creation time | Below is the the instruction that describes the task:
### Input:
Generate a new JWT token for this user_id. Default expiration date
is 1 year from creation time
### Response:
def generate_token(user_id, expire_in=None, data={}, issuer=None, iat=None):
"""Generate a new JWT token for this user_id. Default expiration date
is 1 year from creation time"""
assert user_id, "No user_id passed to generate_token()"
assert isinstance(data, dict), "generate_token(data=) should be a dictionary"
assert get_config().jwt_secret, "No JWT secret configured in pymacaron"
if not issuer:
issuer = get_config().jwt_issuer
assert issuer, "No JWT issuer configured for pymacaron"
if expire_in is None:
expire_in = get_config().jwt_token_timeout
if iat:
epoch_now = iat
else:
epoch_now = to_epoch(timenow())
epoch_end = epoch_now + expire_in
data['iss'] = issuer
data['sub'] = user_id
data['aud'] = get_config().jwt_audience
data['exp'] = epoch_end
data['iat'] = epoch_now
headers = {
"typ": "JWT",
"alg": "HS256",
"iss": issuer,
}
log.debug("Encoding token with data %s and headers %s (secret:%s****)" % (data, headers, get_config().jwt_secret[0:8]))
t = jwt.encode(
data,
get_config().jwt_secret,
headers=headers,
)
if type(t) is bytes:
t = t.decode("utf-8")
return t |
def currentProfile(self):
"""
Returns the currently selected profile from the system.
:return <XViewProfile>
"""
index = self._profileCombo.currentIndex()
if 0 <= index and index < len(self._profiles):
return self._profiles[index]
return None | Returns the currently selected profile from the system.
:return <XViewProfile> | Below is the the instruction that describes the task:
### Input:
Returns the currently selected profile from the system.
:return <XViewProfile>
### Response:
def currentProfile(self):
"""
Returns the currently selected profile from the system.
:return <XViewProfile>
"""
index = self._profileCombo.currentIndex()
if 0 <= index and index < len(self._profiles):
return self._profiles[index]
return None |
def get_settings(self, site=None, role=None):
"""
Retrieves the Django settings dictionary.
"""
r = self.local_renderer
_stdout = sys.stdout
_stderr = sys.stderr
if not self.verbose:
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
sys.path.insert(0, r.env.src_dir)
# Temporarily override SITE.
tmp_site = self.genv.SITE
if site and site.endswith('_secure'):
site = site[:-7]
site = site or self.genv.SITE or self.genv.default_site
self.set_site(site)
# Temporarily override ROLE.
tmp_role = self.genv.ROLE
if role:
self.set_role(role)
try:
# We need to explicitly delete sub-modules from sys.modules. Otherwise, reload() skips
# them and they'll continue to contain obsolete settings.
if r.env.delete_module_with_prefixes:
for name in sorted(sys.modules):
for prefix in r.env.delete_module_with_prefixes:
if name.startswith(prefix):
if self.verbose:
print('Deleting module %s prior to re-import.' % name)
del sys.modules[name]
break
for name in list(sys.modules):
for s in r.env.delete_module_containing:
if s in name:
del sys.modules[name]
break
if r.env.settings_module in sys.modules:
del sys.modules[r.env.settings_module]
#TODO:fix r.env.settings_module not loading from settings?
# print('r.genv.django_settings_module:', r.genv.django_settings_module, file=_stdout)
# print('r.genv.dj_settings_module:', r.genv.dj_settings_module, file=_stdout)
# print('r.env.settings_module:', r.env.settings_module, file=_stdout)
if 'django_settings_module' in r.genv:
r.env.settings_module = r.genv.django_settings_module
else:
r.env.settings_module = r.env.settings_module or r.genv.dj_settings_module
if self.verbose:
print('r.env.settings_module:', r.env.settings_module, r.format(r.env.settings_module))
module = import_module(r.format(r.env.settings_module))
if site:
assert site == module.SITE, 'Unable to set SITE to "%s" Instead it is set to "%s".' % (site, module.SITE)
# Works as long as settings.py doesn't also reload anything.
import imp
imp.reload(module)
except ImportError as e:
print('Warning: Could not import settings for site "%s": %s' % (site, e), file=_stdout)
traceback.print_exc(file=_stdout)
#raise # breaks *_secure pseudo sites
return
finally:
if tmp_site:
self.set_site(tmp_site)
if tmp_role:
self.set_role(tmp_role)
finally:
sys.stdout = _stdout
sys.stderr = _stderr
sys.path.remove(r.env.src_dir)
return module | Retrieves the Django settings dictionary. | Below is the the instruction that describes the task:
### Input:
Retrieves the Django settings dictionary.
### Response:
def get_settings(self, site=None, role=None):
"""
Retrieves the Django settings dictionary.
"""
r = self.local_renderer
_stdout = sys.stdout
_stderr = sys.stderr
if not self.verbose:
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
sys.path.insert(0, r.env.src_dir)
# Temporarily override SITE.
tmp_site = self.genv.SITE
if site and site.endswith('_secure'):
site = site[:-7]
site = site or self.genv.SITE or self.genv.default_site
self.set_site(site)
# Temporarily override ROLE.
tmp_role = self.genv.ROLE
if role:
self.set_role(role)
try:
# We need to explicitly delete sub-modules from sys.modules. Otherwise, reload() skips
# them and they'll continue to contain obsolete settings.
if r.env.delete_module_with_prefixes:
for name in sorted(sys.modules):
for prefix in r.env.delete_module_with_prefixes:
if name.startswith(prefix):
if self.verbose:
print('Deleting module %s prior to re-import.' % name)
del sys.modules[name]
break
for name in list(sys.modules):
for s in r.env.delete_module_containing:
if s in name:
del sys.modules[name]
break
if r.env.settings_module in sys.modules:
del sys.modules[r.env.settings_module]
#TODO:fix r.env.settings_module not loading from settings?
# print('r.genv.django_settings_module:', r.genv.django_settings_module, file=_stdout)
# print('r.genv.dj_settings_module:', r.genv.dj_settings_module, file=_stdout)
# print('r.env.settings_module:', r.env.settings_module, file=_stdout)
if 'django_settings_module' in r.genv:
r.env.settings_module = r.genv.django_settings_module
else:
r.env.settings_module = r.env.settings_module or r.genv.dj_settings_module
if self.verbose:
print('r.env.settings_module:', r.env.settings_module, r.format(r.env.settings_module))
module = import_module(r.format(r.env.settings_module))
if site:
assert site == module.SITE, 'Unable to set SITE to "%s" Instead it is set to "%s".' % (site, module.SITE)
# Works as long as settings.py doesn't also reload anything.
import imp
imp.reload(module)
except ImportError as e:
print('Warning: Could not import settings for site "%s": %s' % (site, e), file=_stdout)
traceback.print_exc(file=_stdout)
#raise # breaks *_secure pseudo sites
return
finally:
if tmp_site:
self.set_site(tmp_site)
if tmp_role:
self.set_role(tmp_role)
finally:
sys.stdout = _stdout
sys.stderr = _stderr
sys.path.remove(r.env.src_dir)
return module |
def printSegmentUpdates(self):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printSegmentUpdates`.
"""
# TODO: need to add C++ accessors to implement this method
assert False
print "=== SEGMENT UPDATES ===, Num = ", len(self.segmentUpdates)
for key, updateList in self.segmentUpdates.iteritems():
c,i = key[0],key[1]
print c,i,updateList | Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printSegmentUpdates`. | Below is the the instruction that describes the task:
### Input:
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printSegmentUpdates`.
### Response:
def printSegmentUpdates(self):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printSegmentUpdates`.
"""
# TODO: need to add C++ accessors to implement this method
assert False
print "=== SEGMENT UPDATES ===, Num = ", len(self.segmentUpdates)
for key, updateList in self.segmentUpdates.iteritems():
c,i = key[0],key[1]
print c,i,updateList |
def print_profile(function):
'''
Decorator that prints memory and runtime information at each call of the function
'''
import memory_profiler
def wrapper(*args,**kwargs):
m=StringIO()
pr=cProfile.Profile()
pr.enable()
temp_func = memory_profiler.profile(func=function,stream=m,precision=4)
output = temp_func(*args,**kwargs)
print(m.getvalue())
pr.disable()
ps = pstats.Stats(pr)
ps.sort_stats('cumulative').print_stats('(?!.*memory_profiler.*)(^.*$)',20)
m.close()
return output
return wrapper | Decorator that prints memory and runtime information at each call of the function | Below is the the instruction that describes the task:
### Input:
Decorator that prints memory and runtime information at each call of the function
### Response:
def print_profile(function):
'''
Decorator that prints memory and runtime information at each call of the function
'''
import memory_profiler
def wrapper(*args,**kwargs):
m=StringIO()
pr=cProfile.Profile()
pr.enable()
temp_func = memory_profiler.profile(func=function,stream=m,precision=4)
output = temp_func(*args,**kwargs)
print(m.getvalue())
pr.disable()
ps = pstats.Stats(pr)
ps.sort_stats('cumulative').print_stats('(?!.*memory_profiler.*)(^.*$)',20)
m.close()
return output
return wrapper |
def assign_from_subscribed(self, assignments):
"""Update the assignment to the specified partitions
This method is called by the coordinator to dynamically assign
partitions based on the consumer's topic subscription. This is different
from assign_from_user() which directly sets the assignment from a
user-supplied TopicPartition list.
Arguments:
assignments (list of TopicPartition): partitions to assign to this
consumer instance.
"""
if not self.partitions_auto_assigned():
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
for tp in assignments:
if tp.topic not in self.subscription:
raise ValueError("Assigned partition %s for non-subscribed topic." % (tp,))
# after rebalancing, we always reinitialize the assignment state
self.assignment.clear()
for tp in assignments:
self._add_assigned_partition(tp)
self.needs_fetch_committed_offsets = True
log.info("Updated partition assignment: %s", assignments) | Update the assignment to the specified partitions
This method is called by the coordinator to dynamically assign
partitions based on the consumer's topic subscription. This is different
from assign_from_user() which directly sets the assignment from a
user-supplied TopicPartition list.
Arguments:
assignments (list of TopicPartition): partitions to assign to this
consumer instance. | Below is the the instruction that describes the task:
### Input:
Update the assignment to the specified partitions
This method is called by the coordinator to dynamically assign
partitions based on the consumer's topic subscription. This is different
from assign_from_user() which directly sets the assignment from a
user-supplied TopicPartition list.
Arguments:
assignments (list of TopicPartition): partitions to assign to this
consumer instance.
### Response:
def assign_from_subscribed(self, assignments):
"""Update the assignment to the specified partitions
This method is called by the coordinator to dynamically assign
partitions based on the consumer's topic subscription. This is different
from assign_from_user() which directly sets the assignment from a
user-supplied TopicPartition list.
Arguments:
assignments (list of TopicPartition): partitions to assign to this
consumer instance.
"""
if not self.partitions_auto_assigned():
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
for tp in assignments:
if tp.topic not in self.subscription:
raise ValueError("Assigned partition %s for non-subscribed topic." % (tp,))
# after rebalancing, we always reinitialize the assignment state
self.assignment.clear()
for tp in assignments:
self._add_assigned_partition(tp)
self.needs_fetch_committed_offsets = True
log.info("Updated partition assignment: %s", assignments) |
def index(self, row, col, parent=QtCore.QModelIndex()):
"""Creates an index. An item must exist for the given *row*
and *col*
:returns: :qtdoc:`QModelIndex`
"""
if row < self._stim.rowCount() and col < self._stim.columnCountForRow(row):
component = self._stim.component(row, col)
return self.createIndex(row, col, component)
else:
return QtCore.QModelIndex() | Creates an index. An item must exist for the given *row*
and *col*
:returns: :qtdoc:`QModelIndex` | Below is the the instruction that describes the task:
### Input:
Creates an index. An item must exist for the given *row*
and *col*
:returns: :qtdoc:`QModelIndex`
### Response:
def index(self, row, col, parent=QtCore.QModelIndex()):
"""Creates an index. An item must exist for the given *row*
and *col*
:returns: :qtdoc:`QModelIndex`
"""
if row < self._stim.rowCount() and col < self._stim.columnCountForRow(row):
component = self._stim.component(row, col)
return self.createIndex(row, col, component)
else:
return QtCore.QModelIndex() |
def smart_search_vrf(self):
""" Perform a smart VRF search.
The "smart" search function tries extract a query from
a text string. This query is then passed to the search_vrf
function, which performs the search.
"""
search_options = {}
extra_query = None
if 'query_id' in request.json:
search_options['query_id'] = request.json['query_id']
if 'max_result' in request.json:
search_options['max_result'] = request.json['max_result']
if 'offset' in request.json:
search_options['offset'] = request.json['offset']
if 'vrf_id' in request.json:
extra_query = {
'val1': 'id',
'operator': 'equals',
'val2': request.json['vrf_id']
}
try:
result = VRF.smart_search(request.json['query_string'],
search_options, extra_query
)
# Remove error key in result from backend as it interferes with the
# error handling of the web interface.
# TODO: Reevaluate how to deal with different types of errors; soft
# errors like query string parser errors and hard errors like lost
# database.
del result['error']
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(result, cls=NipapJSONEncoder) | Perform a smart VRF search.
The "smart" search function tries extract a query from
a text string. This query is then passed to the search_vrf
function, which performs the search. | Below is the the instruction that describes the task:
### Input:
Perform a smart VRF search.
The "smart" search function tries extract a query from
a text string. This query is then passed to the search_vrf
function, which performs the search.
### Response:
def smart_search_vrf(self):
""" Perform a smart VRF search.
The "smart" search function tries extract a query from
a text string. This query is then passed to the search_vrf
function, which performs the search.
"""
search_options = {}
extra_query = None
if 'query_id' in request.json:
search_options['query_id'] = request.json['query_id']
if 'max_result' in request.json:
search_options['max_result'] = request.json['max_result']
if 'offset' in request.json:
search_options['offset'] = request.json['offset']
if 'vrf_id' in request.json:
extra_query = {
'val1': 'id',
'operator': 'equals',
'val2': request.json['vrf_id']
}
try:
result = VRF.smart_search(request.json['query_string'],
search_options, extra_query
)
# Remove error key in result from backend as it interferes with the
# error handling of the web interface.
# TODO: Reevaluate how to deal with different types of errors; soft
# errors like query string parser errors and hard errors like lost
# database.
del result['error']
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(result, cls=NipapJSONEncoder) |
def serveWeek(self, request, year=None, week=None):
"""Weekly calendar view."""
myurl = self.get_url(request)
def myUrl(urlYear, urlWeek):
if (urlYear < 1900 or
urlYear > 2099 or
urlYear == 2099 and urlWeek == 53):
return None
if urlWeek == 53 and num_weeks_in_year(urlYear) == 52:
urlWeek = 52
return myurl + self.reverse_subpage('serveWeek',
args=[urlYear, urlWeek])
today = timezone.localdate()
thisYear, thisWeekNum, dow = gregorian_to_week_date(today)
if year is None: year = thisYear
if week is None: week = thisWeekNum
year = int(year)
week = int(week)
firstDay, lastDay, prevYearNumWeeks, yearNumWeeks = week_info(year, week)
if week == 53 and yearNumWeeks == 52:
raise Http404("Only 52 weeks in {}".format(year))
eventsInWeek = self._getEventsByDay(request, firstDay, lastDay)
if firstDay.year >= 1900:
monthlyUrl = myurl + self.reverse_subpage('serveMonth',
args=[firstDay.year, firstDay.month])
else:
monthlyUrl = myurl + self.reverse_subpage('serveMonth', args=[1900, 1])
listUrl = myurl + self.reverse_subpage('serveUpcoming')
prevWeek = week - 1
prevWeekYear = year
if prevWeek == 0:
prevWeek = prevYearNumWeeks
prevWeekYear -= 1
nextWeek = week + 1
nextWeekYear = year
if nextWeek > yearNumWeeks:
nextWeek = 1
nextWeekYear += 1
# TODO Consider changing to a TemplateResponse
# https://stackoverflow.com/questions/38838601
return render(request, "joyous/calendar_week.html",
{'self': self,
'page': self,
'version': __version__,
'year': year,
'week': week,
'today': today,
'yesterday': today - dt.timedelta(1),
'prevWeekUrl': myUrl(prevWeekYear, prevWeek),
'nextWeekUrl': myUrl(nextWeekYear, nextWeek),
'prevYearUrl': myUrl(year - 1, week),
'nextYearUrl': myUrl(year + 1, week),
'thisWeekUrl': myUrl(thisYear, thisWeekNum),
'monthlyUrl': monthlyUrl,
'listUrl': listUrl,
'weekName': _("Week {weekNum}").format(weekNum=week),
'weekdayAbbr': weekday_abbr,
'events': [eventsInWeek]}) | Weekly calendar view. | Below is the the instruction that describes the task:
### Input:
Weekly calendar view.
### Response:
def serveWeek(self, request, year=None, week=None):
"""Weekly calendar view."""
myurl = self.get_url(request)
def myUrl(urlYear, urlWeek):
if (urlYear < 1900 or
urlYear > 2099 or
urlYear == 2099 and urlWeek == 53):
return None
if urlWeek == 53 and num_weeks_in_year(urlYear) == 52:
urlWeek = 52
return myurl + self.reverse_subpage('serveWeek',
args=[urlYear, urlWeek])
today = timezone.localdate()
thisYear, thisWeekNum, dow = gregorian_to_week_date(today)
if year is None: year = thisYear
if week is None: week = thisWeekNum
year = int(year)
week = int(week)
firstDay, lastDay, prevYearNumWeeks, yearNumWeeks = week_info(year, week)
if week == 53 and yearNumWeeks == 52:
raise Http404("Only 52 weeks in {}".format(year))
eventsInWeek = self._getEventsByDay(request, firstDay, lastDay)
if firstDay.year >= 1900:
monthlyUrl = myurl + self.reverse_subpage('serveMonth',
args=[firstDay.year, firstDay.month])
else:
monthlyUrl = myurl + self.reverse_subpage('serveMonth', args=[1900, 1])
listUrl = myurl + self.reverse_subpage('serveUpcoming')
prevWeek = week - 1
prevWeekYear = year
if prevWeek == 0:
prevWeek = prevYearNumWeeks
prevWeekYear -= 1
nextWeek = week + 1
nextWeekYear = year
if nextWeek > yearNumWeeks:
nextWeek = 1
nextWeekYear += 1
# TODO Consider changing to a TemplateResponse
# https://stackoverflow.com/questions/38838601
return render(request, "joyous/calendar_week.html",
{'self': self,
'page': self,
'version': __version__,
'year': year,
'week': week,
'today': today,
'yesterday': today - dt.timedelta(1),
'prevWeekUrl': myUrl(prevWeekYear, prevWeek),
'nextWeekUrl': myUrl(nextWeekYear, nextWeek),
'prevYearUrl': myUrl(year - 1, week),
'nextYearUrl': myUrl(year + 1, week),
'thisWeekUrl': myUrl(thisYear, thisWeekNum),
'monthlyUrl': monthlyUrl,
'listUrl': listUrl,
'weekName': _("Week {weekNum}").format(weekNum=week),
'weekdayAbbr': weekday_abbr,
'events': [eventsInWeek]}) |
def _construct_as_path_attr(self, as_path_attr, as4_path_attr):
"""Marge AS_PATH and AS4_PATH attribute instances into
a single AS_PATH instance."""
def _listify(li):
"""Reconstruct AS_PATH list.
Example::
>>> _listify([[1, 2, 3], {4, 5}, [6, 7]])
[1, 2, 3, {4, 5}, 6, 7]
"""
lo = []
for l in li:
if isinstance(l, list):
lo.extend(l)
elif isinstance(l, set):
lo.append(l)
else:
pass
return lo
# If AS4_PATH attribute is None, returns the given AS_PATH attribute
if as4_path_attr is None:
return as_path_attr
# If AS_PATH is shorter than AS4_PATH, AS4_PATH should be ignored.
if as_path_attr.get_as_path_len() < as4_path_attr.get_as_path_len():
return as_path_attr
org_as_path_list = _listify(as_path_attr.path_seg_list)
as4_path_list = _listify(as4_path_attr.path_seg_list)
# Reverse to compare backward.
org_as_path_list.reverse()
as4_path_list.reverse()
new_as_path_list = []
tmp_list = []
for as_path, as4_path in zip_longest(org_as_path_list, as4_path_list):
if as4_path is None:
if isinstance(as_path, int):
tmp_list.insert(0, as_path)
elif isinstance(as_path, set):
if tmp_list:
new_as_path_list.insert(0, tmp_list)
tmp_list = []
new_as_path_list.insert(0, as_path)
else:
pass
elif isinstance(as4_path, int):
tmp_list.insert(0, as4_path)
elif isinstance(as4_path, set):
if tmp_list:
new_as_path_list.insert(0, tmp_list)
tmp_list = []
new_as_path_list.insert(0, as4_path)
else:
pass
if tmp_list:
new_as_path_list.insert(0, tmp_list)
return bgp.BGPPathAttributeAsPath(new_as_path_list) | Marge AS_PATH and AS4_PATH attribute instances into
a single AS_PATH instance. | Below is the the instruction that describes the task:
### Input:
Marge AS_PATH and AS4_PATH attribute instances into
a single AS_PATH instance.
### Response:
def _construct_as_path_attr(self, as_path_attr, as4_path_attr):
"""Marge AS_PATH and AS4_PATH attribute instances into
a single AS_PATH instance."""
def _listify(li):
"""Reconstruct AS_PATH list.
Example::
>>> _listify([[1, 2, 3], {4, 5}, [6, 7]])
[1, 2, 3, {4, 5}, 6, 7]
"""
lo = []
for l in li:
if isinstance(l, list):
lo.extend(l)
elif isinstance(l, set):
lo.append(l)
else:
pass
return lo
# If AS4_PATH attribute is None, returns the given AS_PATH attribute
if as4_path_attr is None:
return as_path_attr
# If AS_PATH is shorter than AS4_PATH, AS4_PATH should be ignored.
if as_path_attr.get_as_path_len() < as4_path_attr.get_as_path_len():
return as_path_attr
org_as_path_list = _listify(as_path_attr.path_seg_list)
as4_path_list = _listify(as4_path_attr.path_seg_list)
# Reverse to compare backward.
org_as_path_list.reverse()
as4_path_list.reverse()
new_as_path_list = []
tmp_list = []
for as_path, as4_path in zip_longest(org_as_path_list, as4_path_list):
if as4_path is None:
if isinstance(as_path, int):
tmp_list.insert(0, as_path)
elif isinstance(as_path, set):
if tmp_list:
new_as_path_list.insert(0, tmp_list)
tmp_list = []
new_as_path_list.insert(0, as_path)
else:
pass
elif isinstance(as4_path, int):
tmp_list.insert(0, as4_path)
elif isinstance(as4_path, set):
if tmp_list:
new_as_path_list.insert(0, tmp_list)
tmp_list = []
new_as_path_list.insert(0, as4_path)
else:
pass
if tmp_list:
new_as_path_list.insert(0, tmp_list)
return bgp.BGPPathAttributeAsPath(new_as_path_list) |
def encode(self, *values):
"""Builds a hash from the passed `values`.
:param values The values to transform into a hashid
>>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456')
>>> hashids.encode(1, 23, 456)
'1d6216i30h53elk3'
"""
if not (values and all(_is_uint(x) for x in values)):
return ''
return _encode(values, self._salt, self._min_length, self._alphabet,
self._separators, self._guards) | Builds a hash from the passed `values`.
:param values The values to transform into a hashid
>>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456')
>>> hashids.encode(1, 23, 456)
'1d6216i30h53elk3' | Below is the the instruction that describes the task:
### Input:
Builds a hash from the passed `values`.
:param values The values to transform into a hashid
>>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456')
>>> hashids.encode(1, 23, 456)
'1d6216i30h53elk3'
### Response:
def encode(self, *values):
"""Builds a hash from the passed `values`.
:param values The values to transform into a hashid
>>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456')
>>> hashids.encode(1, 23, 456)
'1d6216i30h53elk3'
"""
if not (values and all(_is_uint(x) for x in values)):
return ''
return _encode(values, self._salt, self._min_length, self._alphabet,
self._separators, self._guards) |
def epcr_report(self):
"""
Create a report of the ePCR-calculated toxin profiles
"""
logging.info('Creating {at} report'.format(at=self.analysistype))
with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') as report:
data = 'Strain,ToxinProfile\n'
for sample in self.metadata:
data += '{sn},{tp}\n'.format(sn=sample.name,
tp=sample[self.analysistype].toxinprofile)
# Write the data to the report
report.write(data) | Create a report of the ePCR-calculated toxin profiles | Below is the the instruction that describes the task:
### Input:
Create a report of the ePCR-calculated toxin profiles
### Response:
def epcr_report(self):
"""
Create a report of the ePCR-calculated toxin profiles
"""
logging.info('Creating {at} report'.format(at=self.analysistype))
with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') as report:
data = 'Strain,ToxinProfile\n'
for sample in self.metadata:
data += '{sn},{tp}\n'.format(sn=sample.name,
tp=sample[self.analysistype].toxinprofile)
# Write the data to the report
report.write(data) |
def find_by_uuid(self, uuid):
"""Find an entry by uuid.
:raise: EntryNotFoundError
"""
for entry in self.entries:
if entry.uuid == uuid:
return entry
raise EntryNotFoundError("Entry not found for uuid: %s" % uuid) | Find an entry by uuid.
:raise: EntryNotFoundError | Below is the the instruction that describes the task:
### Input:
Find an entry by uuid.
:raise: EntryNotFoundError
### Response:
def find_by_uuid(self, uuid):
"""Find an entry by uuid.
:raise: EntryNotFoundError
"""
for entry in self.entries:
if entry.uuid == uuid:
return entry
raise EntryNotFoundError("Entry not found for uuid: %s" % uuid) |
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction,
LibSymlinksAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = LdModPrefixGenerator,
suffix = LdModSuffixGenerator,
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module | This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one. | Below is the the instruction that describes the task:
### Input:
This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
### Response:
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction,
LibSymlinksAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = LdModPrefixGenerator,
suffix = LdModSuffixGenerator,
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module |
def validate_unit(input_unit):
"""Validate unit.
To be compatible with existing SYNPHOT data files:
* 'angstroms' and 'inversemicrons' are accepted although
unrecognized by astropy units
* 'transmission', 'extinction', and 'emissivity' are
converted to astropy dimensionless unit
Parameters
----------
input_unit : str or `~astropy.units.core.Unit`
Unit to validate.
Returns
-------
output_unit : `~astropy.units.core.Unit`
Validated unit.
Raises
------
synphot.exceptions.SynphotError
Invalid unit.
"""
if isinstance(input_unit, str):
input_unit_lowcase = input_unit.lower()
# Backward-compatibility
if input_unit_lowcase == 'angstroms':
output_unit = u.AA
elif input_unit_lowcase == 'inversemicrons':
output_unit = u.micron ** -1
elif input_unit_lowcase in ('transmission', 'extinction',
'emissivity'):
output_unit = THROUGHPUT
elif input_unit_lowcase == 'jy':
output_unit = u.Jy
# Work around mag unit limitations
elif input_unit_lowcase in ('stmag', 'mag(st)'):
output_unit = u.STmag
elif input_unit_lowcase in ('abmag', 'mag(ab)'):
output_unit = u.ABmag
else:
try: # astropy.units is case-sensitive
output_unit = u.Unit(input_unit)
except ValueError: # synphot is case-insensitive
output_unit = u.Unit(input_unit_lowcase)
elif isinstance(input_unit, (u.UnitBase, u.LogUnit)):
output_unit = input_unit
else:
raise exceptions.SynphotError(
'{0} must be a recognized string or '
'astropy.units.core.Unit'.format(input_unit))
return output_unit | Validate unit.
To be compatible with existing SYNPHOT data files:
* 'angstroms' and 'inversemicrons' are accepted although
unrecognized by astropy units
* 'transmission', 'extinction', and 'emissivity' are
converted to astropy dimensionless unit
Parameters
----------
input_unit : str or `~astropy.units.core.Unit`
Unit to validate.
Returns
-------
output_unit : `~astropy.units.core.Unit`
Validated unit.
Raises
------
synphot.exceptions.SynphotError
Invalid unit. | Below is the the instruction that describes the task:
### Input:
Validate unit.
To be compatible with existing SYNPHOT data files:
* 'angstroms' and 'inversemicrons' are accepted although
unrecognized by astropy units
* 'transmission', 'extinction', and 'emissivity' are
converted to astropy dimensionless unit
Parameters
----------
input_unit : str or `~astropy.units.core.Unit`
Unit to validate.
Returns
-------
output_unit : `~astropy.units.core.Unit`
Validated unit.
Raises
------
synphot.exceptions.SynphotError
Invalid unit.
### Response:
def validate_unit(input_unit):
"""Validate unit.
To be compatible with existing SYNPHOT data files:
* 'angstroms' and 'inversemicrons' are accepted although
unrecognized by astropy units
* 'transmission', 'extinction', and 'emissivity' are
converted to astropy dimensionless unit
Parameters
----------
input_unit : str or `~astropy.units.core.Unit`
Unit to validate.
Returns
-------
output_unit : `~astropy.units.core.Unit`
Validated unit.
Raises
------
synphot.exceptions.SynphotError
Invalid unit.
"""
if isinstance(input_unit, str):
input_unit_lowcase = input_unit.lower()
# Backward-compatibility
if input_unit_lowcase == 'angstroms':
output_unit = u.AA
elif input_unit_lowcase == 'inversemicrons':
output_unit = u.micron ** -1
elif input_unit_lowcase in ('transmission', 'extinction',
'emissivity'):
output_unit = THROUGHPUT
elif input_unit_lowcase == 'jy':
output_unit = u.Jy
# Work around mag unit limitations
elif input_unit_lowcase in ('stmag', 'mag(st)'):
output_unit = u.STmag
elif input_unit_lowcase in ('abmag', 'mag(ab)'):
output_unit = u.ABmag
else:
try: # astropy.units is case-sensitive
output_unit = u.Unit(input_unit)
except ValueError: # synphot is case-insensitive
output_unit = u.Unit(input_unit_lowcase)
elif isinstance(input_unit, (u.UnitBase, u.LogUnit)):
output_unit = input_unit
else:
raise exceptions.SynphotError(
'{0} must be a recognized string or '
'astropy.units.core.Unit'.format(input_unit))
return output_unit |
def get(self, url, params=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'get')
:param str url: url to send get oauth request to
:param dict params: request parameter to get the service data
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'get', params=params, **kwargs) | Shorthand for self.oauth_request(url, 'get')
:param str url: url to send get oauth request to
:param dict params: request parameter to get the service data
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Shorthand for self.oauth_request(url, 'get')
:param str url: url to send get oauth request to
:param dict params: request parameter to get the service data
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
### Response:
def get(self, url, params=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'get')
:param str url: url to send get oauth request to
:param dict params: request parameter to get the service data
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'get', params=params, **kwargs) |
def temporal_network(gtfs,
start_time_ut=None,
end_time_ut=None,
route_type=None):
"""
Compute the temporal network of the data, and return it as a pandas.DataFrame
Parameters
----------
gtfs : gtfspy.GTFS
start_time_ut: int | None
start time of the time span (in unix time)
end_time_ut: int | None
end time of the time span (in unix time)
route_type: int | None
Specifies which mode of public transport are included, or whether all modes should be included.
The int should be one of the standard GTFS route_types:
(see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )
If route_type is not specified, all modes are included.
Returns
-------
events_df: pandas.DataFrame
Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I
"""
events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,
end_time_ut=end_time_ut,
route_type=route_type)
events_df.drop('to_seq', 1, inplace=True)
events_df.drop('shape_id', 1, inplace=True)
events_df.drop('duration', 1, inplace=True)
events_df.drop('route_id', 1, inplace=True)
events_df.rename(
columns={
'from_seq': "seq"
},
inplace=True
)
return events_df | Compute the temporal network of the data, and return it as a pandas.DataFrame
Parameters
----------
gtfs : gtfspy.GTFS
start_time_ut: int | None
start time of the time span (in unix time)
end_time_ut: int | None
end time of the time span (in unix time)
route_type: int | None
Specifies which mode of public transport are included, or whether all modes should be included.
The int should be one of the standard GTFS route_types:
(see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )
If route_type is not specified, all modes are included.
Returns
-------
events_df: pandas.DataFrame
Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I | Below is the the instruction that describes the task:
### Input:
Compute the temporal network of the data, and return it as a pandas.DataFrame
Parameters
----------
gtfs : gtfspy.GTFS
start_time_ut: int | None
start time of the time span (in unix time)
end_time_ut: int | None
end time of the time span (in unix time)
route_type: int | None
Specifies which mode of public transport are included, or whether all modes should be included.
The int should be one of the standard GTFS route_types:
(see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )
If route_type is not specified, all modes are included.
Returns
-------
events_df: pandas.DataFrame
Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I
### Response:
def temporal_network(gtfs,
start_time_ut=None,
end_time_ut=None,
route_type=None):
"""
Compute the temporal network of the data, and return it as a pandas.DataFrame
Parameters
----------
gtfs : gtfspy.GTFS
start_time_ut: int | None
start time of the time span (in unix time)
end_time_ut: int | None
end time of the time span (in unix time)
route_type: int | None
Specifies which mode of public transport are included, or whether all modes should be included.
The int should be one of the standard GTFS route_types:
(see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )
If route_type is not specified, all modes are included.
Returns
-------
events_df: pandas.DataFrame
Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I
"""
events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,
end_time_ut=end_time_ut,
route_type=route_type)
events_df.drop('to_seq', 1, inplace=True)
events_df.drop('shape_id', 1, inplace=True)
events_df.drop('duration', 1, inplace=True)
events_df.drop('route_id', 1, inplace=True)
events_df.rename(
columns={
'from_seq': "seq"
},
inplace=True
)
return events_df |
def binom(n, k):
"""
Returns binomial coefficient (n choose k).
"""
# http://blog.plover.com/math/choose.html
if k > n:
return 0
if k == 0:
return 1
result = 1
for denom in range(1, k + 1):
result *= n
result /= denom
n -= 1
return result | Returns binomial coefficient (n choose k). | Below is the the instruction that describes the task:
### Input:
Returns binomial coefficient (n choose k).
### Response:
def binom(n, k):
"""
Returns binomial coefficient (n choose k).
"""
# http://blog.plover.com/math/choose.html
if k > n:
return 0
if k == 0:
return 1
result = 1
for denom in range(1, k + 1):
result *= n
result /= denom
n -= 1
return result |
def from_inches(value, units):
"""
Convert value in inches to given units
Parameters
----------
value : float
Value to be converted
units : str
Units to convert value to. Must be one of
`['in', 'cm', 'mm']`.
"""
lookup = {'in': lambda x: x,
'cm': lambda x: x*2.54,
'mm': lambda x: x*2.54*10}
try:
return lookup[units](value)
except KeyError:
raise PlotnineError("Unknown units '{}'".format(units)) | Convert value in inches to given units
Parameters
----------
value : float
Value to be converted
units : str
Units to convert value to. Must be one of
`['in', 'cm', 'mm']`. | Below is the the instruction that describes the task:
### Input:
Convert value in inches to given units
Parameters
----------
value : float
Value to be converted
units : str
Units to convert value to. Must be one of
`['in', 'cm', 'mm']`.
### Response:
def from_inches(value, units):
"""
Convert value in inches to given units
Parameters
----------
value : float
Value to be converted
units : str
Units to convert value to. Must be one of
`['in', 'cm', 'mm']`.
"""
lookup = {'in': lambda x: x,
'cm': lambda x: x*2.54,
'mm': lambda x: x*2.54*10}
try:
return lookup[units](value)
except KeyError:
raise PlotnineError("Unknown units '{}'".format(units)) |
def average_patterson_d(aca, acb, acc, acd, blen):
"""Estimate D(A, B; C, D) and standard error using the block-jackknife.
Parameters
----------
aca : array_like, int, shape (n_variants, 2),
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
acc : array_like, int, shape (n_variants, 2)
Allele counts for population C.
acd : array_like, int, shape (n_variants, 2)
Allele counts for population D.
blen : int
Block size (number of variants).
Returns
-------
d : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
z : float
Z-score (number of standard errors from zero).
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
Notes
-----
See Patterson (2012), main text and Appendix A.
See Also
--------
allel.stats.admixture.patterson_d
"""
# calculate per-variant values
num, den = patterson_d(aca, acb, acc, acd)
# N.B., nans can occur if any of the populations have completely missing
# genotype calls at a variant (i.e., allele number is zero). Here we
# assume that is rare enough to be negligible.
# calculate overall estimate
d_avg = np.nansum(num) / np.nansum(den)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(num, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(den, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
# compute Z score
z = d_avg / se
return d_avg, se, z, vb, vj | Estimate D(A, B; C, D) and standard error using the block-jackknife.
Parameters
----------
aca : array_like, int, shape (n_variants, 2),
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
acc : array_like, int, shape (n_variants, 2)
Allele counts for population C.
acd : array_like, int, shape (n_variants, 2)
Allele counts for population D.
blen : int
Block size (number of variants).
Returns
-------
d : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
z : float
Z-score (number of standard errors from zero).
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
Notes
-----
See Patterson (2012), main text and Appendix A.
See Also
--------
allel.stats.admixture.patterson_d | Below is the the instruction that describes the task:
### Input:
Estimate D(A, B; C, D) and standard error using the block-jackknife.
Parameters
----------
aca : array_like, int, shape (n_variants, 2),
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
acc : array_like, int, shape (n_variants, 2)
Allele counts for population C.
acd : array_like, int, shape (n_variants, 2)
Allele counts for population D.
blen : int
Block size (number of variants).
Returns
-------
d : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
z : float
Z-score (number of standard errors from zero).
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
Notes
-----
See Patterson (2012), main text and Appendix A.
See Also
--------
allel.stats.admixture.patterson_d
### Response:
def average_patterson_d(aca, acb, acc, acd, blen):
"""Estimate D(A, B; C, D) and standard error using the block-jackknife.
Parameters
----------
aca : array_like, int, shape (n_variants, 2),
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
acc : array_like, int, shape (n_variants, 2)
Allele counts for population C.
acd : array_like, int, shape (n_variants, 2)
Allele counts for population D.
blen : int
Block size (number of variants).
Returns
-------
d : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
z : float
Z-score (number of standard errors from zero).
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
Notes
-----
See Patterson (2012), main text and Appendix A.
See Also
--------
allel.stats.admixture.patterson_d
"""
# calculate per-variant values
num, den = patterson_d(aca, acb, acc, acd)
# N.B., nans can occur if any of the populations have completely missing
# genotype calls at a variant (i.e., allele number is zero). Here we
# assume that is rare enough to be negligible.
# calculate overall estimate
d_avg = np.nansum(num) / np.nansum(den)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(num, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(den, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
# compute Z score
z = d_avg / se
return d_avg, se, z, vb, vj |
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension in GetHeaderExtensions():
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
if Search(r'\bliterals\b', line):
error(filename, linenum, 'build/namespaces_literals', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
else:
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension in GetHeaderExtensions()
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.') | Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found. | Below is the the instruction that describes the task:
### Input:
Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
### Response:
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension in GetHeaderExtensions():
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
if Search(r'\bliterals\b', line):
error(filename, linenum, 'build/namespaces_literals', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
else:
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension in GetHeaderExtensions()
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.') |
def update_properties(self, properties):
"""
Update writeable properties of this NIC.
Authorization requirements:
* Object-access permission to the Partition containing this NIC.
* Object-access permission to the backing Adapter for this NIC.
* Task permission to the "Partition Details" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model - NIC Element Object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
self.manager.session.post(self.uri, body=properties)
is_rename = self.manager._name_prop in properties
if is_rename:
# Delete the old name from the cache
self.manager._name_uri_cache.delete(self.name)
self.properties.update(copy.deepcopy(properties))
if is_rename:
# Add the new name to the cache
self.manager._name_uri_cache.update(self.name, self.uri) | Update writeable properties of this NIC.
Authorization requirements:
* Object-access permission to the Partition containing this NIC.
* Object-access permission to the backing Adapter for this NIC.
* Task permission to the "Partition Details" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model - NIC Element Object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | Below is the the instruction that describes the task:
### Input:
Update writeable properties of this NIC.
Authorization requirements:
* Object-access permission to the Partition containing this NIC.
* Object-access permission to the backing Adapter for this NIC.
* Task permission to the "Partition Details" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model - NIC Element Object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
### Response:
def update_properties(self, properties):
"""
Update writeable properties of this NIC.
Authorization requirements:
* Object-access permission to the Partition containing this NIC.
* Object-access permission to the backing Adapter for this NIC.
* Task permission to the "Partition Details" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model - NIC Element Object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
self.manager.session.post(self.uri, body=properties)
is_rename = self.manager._name_prop in properties
if is_rename:
# Delete the old name from the cache
self.manager._name_uri_cache.delete(self.name)
self.properties.update(copy.deepcopy(properties))
if is_rename:
# Add the new name to the cache
self.manager._name_uri_cache.update(self.name, self.uri) |
def estimation_required(func, *args, **kw):
"""
Decorator checking the self._estimated flag in an Estimator instance, raising a value error if the decorated
function is called before estimator.estimate() has been called.
If mixed with a property-annotation, this annotation needs to come first in the chain of function calls, i.e.,
@property
@estimation_required
def func(self):
....
"""
self = args[0] if len(args) > 0 else None
if self and hasattr(self, '_estimated') and not self._estimated:
raise ValueError("Tried calling %s on %s which requires the estimator to be estimated."
% (func.__name__, self.__class__.__name__))
return func(*args, **kw) | Decorator checking the self._estimated flag in an Estimator instance, raising a value error if the decorated
function is called before estimator.estimate() has been called.
If mixed with a property-annotation, this annotation needs to come first in the chain of function calls, i.e.,
@property
@estimation_required
def func(self):
.... | Below is the the instruction that describes the task:
### Input:
Decorator checking the self._estimated flag in an Estimator instance, raising a value error if the decorated
function is called before estimator.estimate() has been called.
If mixed with a property-annotation, this annotation needs to come first in the chain of function calls, i.e.,
@property
@estimation_required
def func(self):
....
### Response:
def estimation_required(func, *args, **kw):
"""
Decorator checking the self._estimated flag in an Estimator instance, raising a value error if the decorated
function is called before estimator.estimate() has been called.
If mixed with a property-annotation, this annotation needs to come first in the chain of function calls, i.e.,
@property
@estimation_required
def func(self):
....
"""
self = args[0] if len(args) > 0 else None
if self and hasattr(self, '_estimated') and not self._estimated:
raise ValueError("Tried calling %s on %s which requires the estimator to be estimated."
% (func.__name__, self.__class__.__name__))
return func(*args, **kw) |
def total_energy_matrix(self):
"""
The total energy matrix. Each matrix element (i, j) corresponds to the
total interaction energy between site i and site j.
Note that this does not include the charged-cell energy, which is only important
when the simulation cell is not charge balanced.
"""
totalenergy = self._recip + self._real
for i in range(len(self._point)):
totalenergy[i, i] += self._point[i]
return totalenergy | The total energy matrix. Each matrix element (i, j) corresponds to the
total interaction energy between site i and site j.
Note that this does not include the charged-cell energy, which is only important
when the simulation cell is not charge balanced. | Below is the the instruction that describes the task:
### Input:
The total energy matrix. Each matrix element (i, j) corresponds to the
total interaction energy between site i and site j.
Note that this does not include the charged-cell energy, which is only important
when the simulation cell is not charge balanced.
### Response:
def total_energy_matrix(self):
"""
The total energy matrix. Each matrix element (i, j) corresponds to the
total interaction energy between site i and site j.
Note that this does not include the charged-cell energy, which is only important
when the simulation cell is not charge balanced.
"""
totalenergy = self._recip + self._real
for i in range(len(self._point)):
totalenergy[i, i] += self._point[i]
return totalenergy |
def reward(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> tf.Tensor:
'''Compiles the reward function given the current `state`, `action` and
`next_state`.
Args:
state (Sequence[tf.Tensor]): A tuple of current state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
next_state (Sequence[tf.Tensor]): A tuple of next state tensors.
Returns:
(:obj:`tf.Tensor`): A tensor representing the reward function.
'''
scope = self.reward_scope(state, action, next_state)
r = self.compile_reward(scope).tensor
with self.graph.as_default():
with tf.name_scope('reward'):
return tf.expand_dims(r, -1) | Compiles the reward function given the current `state`, `action` and
`next_state`.
Args:
state (Sequence[tf.Tensor]): A tuple of current state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
next_state (Sequence[tf.Tensor]): A tuple of next state tensors.
Returns:
(:obj:`tf.Tensor`): A tensor representing the reward function. | Below is the the instruction that describes the task:
### Input:
Compiles the reward function given the current `state`, `action` and
`next_state`.
Args:
state (Sequence[tf.Tensor]): A tuple of current state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
next_state (Sequence[tf.Tensor]): A tuple of next state tensors.
Returns:
(:obj:`tf.Tensor`): A tensor representing the reward function.
### Response:
def reward(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> tf.Tensor:
'''Compiles the reward function given the current `state`, `action` and
`next_state`.
Args:
state (Sequence[tf.Tensor]): A tuple of current state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
next_state (Sequence[tf.Tensor]): A tuple of next state tensors.
Returns:
(:obj:`tf.Tensor`): A tensor representing the reward function.
'''
scope = self.reward_scope(state, action, next_state)
r = self.compile_reward(scope).tensor
with self.graph.as_default():
with tf.name_scope('reward'):
return tf.expand_dims(r, -1) |
def detach(gandi, resource, background, force):
"""Detach an ip from it's currently attached vm.
resource can be an ip id or ip.
"""
if not force:
proceed = click.confirm('Are you sure you want to detach ip %s?' %
resource)
if not proceed:
return
return gandi.ip.detach(resource, background, force) | Detach an ip from it's currently attached vm.
resource can be an ip id or ip. | Below is the the instruction that describes the task:
### Input:
Detach an ip from it's currently attached vm.
resource can be an ip id or ip.
### Response:
def detach(gandi, resource, background, force):
"""Detach an ip from it's currently attached vm.
resource can be an ip id or ip.
"""
if not force:
proceed = click.confirm('Are you sure you want to detach ip %s?' %
resource)
if not proceed:
return
return gandi.ip.detach(resource, background, force) |
def denoise_grid(self, val, expand=1):
"""
for every cell in the grid of 'val' fill all cells
around it to de noise the grid
"""
updated_grid = [[self.grd.get_tile(y,x) \
for x in range(self.grd.grid_width)] \
for y in range(self.grd.grid_height)]
for row in range(self.grd.get_grid_height() - expand):
for col in range(self.grd.get_grid_width() - expand):
updated_grid[row][col] = self.grd.get_tile(row,col) # set original point
if self.grd.get_tile(row,col) == val:
for y in range(-expand, expand):
for x in range(-expand, expand):
new_x = col+x
new_y = row+y
if new_x < 0: new_x = 0
if new_y < 0: new_y = 0
if new_x > self.grd.get_grid_width() - 1: new_x = self.grd.get_grid_width() - 1
if new_y > self.grd.get_grid_height() - 1: new_y = self.grd.get_grid_height() - 1
# randomly NOT denoise to make interesting edges
if expand > 0:
if randint(1,expand * 2) > (expand+1):
updated_grid[new_y][new_x] = val
else:
updated_grid[new_y][new_x] = val
self.grd.replace_grid(updated_grid) | for every cell in the grid of 'val' fill all cells
around it to de noise the grid | Below is the the instruction that describes the task:
### Input:
for every cell in the grid of 'val' fill all cells
around it to de noise the grid
### Response:
def denoise_grid(self, val, expand=1):
"""
for every cell in the grid of 'val' fill all cells
around it to de noise the grid
"""
updated_grid = [[self.grd.get_tile(y,x) \
for x in range(self.grd.grid_width)] \
for y in range(self.grd.grid_height)]
for row in range(self.grd.get_grid_height() - expand):
for col in range(self.grd.get_grid_width() - expand):
updated_grid[row][col] = self.grd.get_tile(row,col) # set original point
if self.grd.get_tile(row,col) == val:
for y in range(-expand, expand):
for x in range(-expand, expand):
new_x = col+x
new_y = row+y
if new_x < 0: new_x = 0
if new_y < 0: new_y = 0
if new_x > self.grd.get_grid_width() - 1: new_x = self.grd.get_grid_width() - 1
if new_y > self.grd.get_grid_height() - 1: new_y = self.grd.get_grid_height() - 1
# randomly NOT denoise to make interesting edges
if expand > 0:
if randint(1,expand * 2) > (expand+1):
updated_grid[new_y][new_x] = val
else:
updated_grid[new_y][new_x] = val
self.grd.replace_grid(updated_grid) |
def res_set_to_phenotype(res_set, full_list):
"""
Converts a set of strings indicating resources to a binary string where
the positions of 1s indicate which resources are present.
Inputs: res_set - a set of strings indicating which resources are present
full_list - a list of strings indicating all resources which could
could be present, and the order in which they should
map to bits in the phenotype
returns: A binary string
"""
full_list = list(full_list)
phenotype = len(full_list) * ["0"]
for i in range(len(full_list)):
if full_list[i] in res_set:
phenotype[i] = "1"
assert(phenotype.count("1") == len(res_set))
# Remove uneceesary leading 0s
while phenotype[0] == "0" and len(phenotype) > 1:
phenotype = phenotype[1:]
return "0b"+"".join(phenotype) | Converts a set of strings indicating resources to a binary string where
the positions of 1s indicate which resources are present.
Inputs: res_set - a set of strings indicating which resources are present
full_list - a list of strings indicating all resources which could
could be present, and the order in which they should
map to bits in the phenotype
returns: A binary string | Below is the the instruction that describes the task:
### Input:
Converts a set of strings indicating resources to a binary string where
the positions of 1s indicate which resources are present.
Inputs: res_set - a set of strings indicating which resources are present
full_list - a list of strings indicating all resources which could
could be present, and the order in which they should
map to bits in the phenotype
returns: A binary string
### Response:
def res_set_to_phenotype(res_set, full_list):
"""
Converts a set of strings indicating resources to a binary string where
the positions of 1s indicate which resources are present.
Inputs: res_set - a set of strings indicating which resources are present
full_list - a list of strings indicating all resources which could
could be present, and the order in which they should
map to bits in the phenotype
returns: A binary string
"""
full_list = list(full_list)
phenotype = len(full_list) * ["0"]
for i in range(len(full_list)):
if full_list[i] in res_set:
phenotype[i] = "1"
assert(phenotype.count("1") == len(res_set))
# Remove uneceesary leading 0s
while phenotype[0] == "0" and len(phenotype) > 1:
phenotype = phenotype[1:]
return "0b"+"".join(phenotype) |
def callback(newstate):
"""Callback from modem, process based on new state"""
print('callback: ', newstate)
if newstate == modem.STATE_RING:
if state == modem.STATE_IDLE:
att = {"cid_time": modem.get_cidtime,
"cid_number": modem.get_cidnumber,
"cid_name": modem.get_cidname}
print('Ringing', att)
elif newstate == modem.STATE_CALLERID:
att = {"cid_time": modem.get_cidtime,
"cid_number": modem.get_cidnumber,
"cid_name": modem.get_cidname}
print('CallerID', att)
elif newstate == modem.STATE_IDLE:
print('idle')
return | Callback from modem, process based on new state | Below is the the instruction that describes the task:
### Input:
Callback from modem, process based on new state
### Response:
def callback(newstate):
"""Callback from modem, process based on new state"""
print('callback: ', newstate)
if newstate == modem.STATE_RING:
if state == modem.STATE_IDLE:
att = {"cid_time": modem.get_cidtime,
"cid_number": modem.get_cidnumber,
"cid_name": modem.get_cidname}
print('Ringing', att)
elif newstate == modem.STATE_CALLERID:
att = {"cid_time": modem.get_cidtime,
"cid_number": modem.get_cidnumber,
"cid_name": modem.get_cidname}
print('CallerID', att)
elif newstate == modem.STATE_IDLE:
print('idle')
return |
def p_importIdentifiers(self, p):
"""importIdentifiers : importIdentifiers ',' importIdentifier
| importIdentifier"""
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]]
elif n == 2:
p[0] = [p[1]] | importIdentifiers : importIdentifiers ',' importIdentifier
| importIdentifier | Below is the the instruction that describes the task:
### Input:
importIdentifiers : importIdentifiers ',' importIdentifier
| importIdentifier
### Response:
def p_importIdentifiers(self, p):
"""importIdentifiers : importIdentifiers ',' importIdentifier
| importIdentifier"""
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]]
elif n == 2:
p[0] = [p[1]] |
def str_to_ipmask(ipmask):
'''
Converts a string with the notation ip/mask (e.g. 192.168.1.1/24 or 192.168.1.1/255.255.255.0) to an hex mask
'''
v = ipmask.split("/")
if len(v) > 2: raise Exception("bad mask format")
mask_ip = ip2hex(v[0])
if mask_ip is None: raise Exception("bad mask format")
mask = v[1] if len(v) == 2 else 32
try:
mask = (0xffffffff00000000 >> int(mask)) & 0xffffffff
except:
mask = ip2hex(v[1])
if mask is None: raise Exception("bad mask format")
return mask_ip, mask | Converts a string with the notation ip/mask (e.g. 192.168.1.1/24 or 192.168.1.1/255.255.255.0) to an hex mask | Below is the the instruction that describes the task:
### Input:
Converts a string with the notation ip/mask (e.g. 192.168.1.1/24 or 192.168.1.1/255.255.255.0) to an hex mask
### Response:
def str_to_ipmask(ipmask):
'''
Converts a string with the notation ip/mask (e.g. 192.168.1.1/24 or 192.168.1.1/255.255.255.0) to an hex mask
'''
v = ipmask.split("/")
if len(v) > 2: raise Exception("bad mask format")
mask_ip = ip2hex(v[0])
if mask_ip is None: raise Exception("bad mask format")
mask = v[1] if len(v) == 2 else 32
try:
mask = (0xffffffff00000000 >> int(mask)) & 0xffffffff
except:
mask = ip2hex(v[1])
if mask is None: raise Exception("bad mask format")
return mask_ip, mask |
def plot_one_track(file_struct, est_times, est_labels, boundaries_id, labels_id,
title=None):
"""Plots the results of one track, with ground truth if it exists."""
import matplotlib.pyplot as plt
# Set up the boundaries id
bid_lid = boundaries_id
if labels_id is not None:
bid_lid += " + " + labels_id
try:
# Read file
jam = jams.load(file_struct.ref_file)
ann = jam.search(namespace='segment_.*')[0]
ref_inters, ref_labels = ann.to_interval_values()
# To times
ref_times = utils.intervals_to_times(ref_inters)
all_boundaries = [ref_times, est_times]
all_labels = [ref_labels, est_labels]
algo_ids = ["GT", bid_lid]
except:
logging.warning("No references found in %s. Not plotting groundtruth"
% file_struct.ref_file)
all_boundaries = [est_times]
all_labels = [est_labels]
algo_ids = [bid_lid]
N = len(all_boundaries)
# Index the labels to normalize them
for i, labels in enumerate(all_labels):
all_labels[i] = mir_eval.util.index_labels(labels)[0]
# Get color map
cm = plt.get_cmap('gist_rainbow')
max_label = max(max(labels) for labels in all_labels)
figsize = (8, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, boundaries in enumerate(all_boundaries):
color = "b"
if i == 0:
color = "g"
for b in boundaries:
plt.axvline(b, i / float(N), (i + 1) / float(N), color=color)
if labels_id is not None:
labels = all_labels[i]
inters = utils.times_to_intervals(boundaries)
for label, inter in zip(labels, inters):
plt.axvspan(inter[0], inter[1], ymin=i / float(N),
ymax=(i + 1) / float(N), alpha=0.6,
color=cm(label / float(max_label)))
plt.axhline(i / float(N), color="k", linewidth=1)
# Format plot
_plot_formatting(title, os.path.basename(file_struct.audio_file), algo_ids,
all_boundaries[0][-1], N, None) | Plots the results of one track, with ground truth if it exists. | Below is the the instruction that describes the task:
### Input:
Plots the results of one track, with ground truth if it exists.
### Response:
def plot_one_track(file_struct, est_times, est_labels, boundaries_id, labels_id,
title=None):
"""Plots the results of one track, with ground truth if it exists."""
import matplotlib.pyplot as plt
# Set up the boundaries id
bid_lid = boundaries_id
if labels_id is not None:
bid_lid += " + " + labels_id
try:
# Read file
jam = jams.load(file_struct.ref_file)
ann = jam.search(namespace='segment_.*')[0]
ref_inters, ref_labels = ann.to_interval_values()
# To times
ref_times = utils.intervals_to_times(ref_inters)
all_boundaries = [ref_times, est_times]
all_labels = [ref_labels, est_labels]
algo_ids = ["GT", bid_lid]
except:
logging.warning("No references found in %s. Not plotting groundtruth"
% file_struct.ref_file)
all_boundaries = [est_times]
all_labels = [est_labels]
algo_ids = [bid_lid]
N = len(all_boundaries)
# Index the labels to normalize them
for i, labels in enumerate(all_labels):
all_labels[i] = mir_eval.util.index_labels(labels)[0]
# Get color map
cm = plt.get_cmap('gist_rainbow')
max_label = max(max(labels) for labels in all_labels)
figsize = (8, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, boundaries in enumerate(all_boundaries):
color = "b"
if i == 0:
color = "g"
for b in boundaries:
plt.axvline(b, i / float(N), (i + 1) / float(N), color=color)
if labels_id is not None:
labels = all_labels[i]
inters = utils.times_to_intervals(boundaries)
for label, inter in zip(labels, inters):
plt.axvspan(inter[0], inter[1], ymin=i / float(N),
ymax=(i + 1) / float(N), alpha=0.6,
color=cm(label / float(max_label)))
plt.axhline(i / float(N), color="k", linewidth=1)
# Format plot
_plot_formatting(title, os.path.basename(file_struct.audio_file), algo_ids,
all_boundaries[0][-1], N, None) |
def symbol_leading_char(self):
"""Return the symbol leading char attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.SYMBOL_LEADING_CHAR) | Return the symbol leading char attribute of the BFD file being
processed. | Below is the the instruction that describes the task:
### Input:
Return the symbol leading char attribute of the BFD file being
processed.
### Response:
def symbol_leading_char(self):
"""Return the symbol leading char attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.SYMBOL_LEADING_CHAR) |
def _get_tables(self, ods):
"""Returns list of table nodes from ods object"""
childnodes = ods.spreadsheet.childNodes
qname_childnodes = [(s.qname[1], s) for s in childnodes]
return [node for name, node in qname_childnodes if name == u"table"] | Returns list of table nodes from ods object | Below is the the instruction that describes the task:
### Input:
Returns list of table nodes from ods object
### Response:
def _get_tables(self, ods):
"""Returns list of table nodes from ods object"""
childnodes = ods.spreadsheet.childNodes
qname_childnodes = [(s.qname[1], s) for s in childnodes]
return [node for name, node in qname_childnodes if name == u"table"] |
def set_state(self, state=None, **kwargs):
""" Set the view state of the camera
Should be a dict (or kwargs) as returned by get_state. It can
be an incomlete dict, in which case only the specified
properties are set.
Parameters
----------
state : dict
The camera state.
**kwargs : dict
Unused keyword arguments.
"""
D = state or {}
D.update(kwargs)
for key, val in D.items():
if key not in self._state_props:
raise KeyError('Not a valid camera state property %r' % key)
setattr(self, key, val) | Set the view state of the camera
Should be a dict (or kwargs) as returned by get_state. It can
be an incomlete dict, in which case only the specified
properties are set.
Parameters
----------
state : dict
The camera state.
**kwargs : dict
Unused keyword arguments. | Below is the the instruction that describes the task:
### Input:
Set the view state of the camera
Should be a dict (or kwargs) as returned by get_state. It can
be an incomlete dict, in which case only the specified
properties are set.
Parameters
----------
state : dict
The camera state.
**kwargs : dict
Unused keyword arguments.
### Response:
def set_state(self, state=None, **kwargs):
""" Set the view state of the camera
Should be a dict (or kwargs) as returned by get_state. It can
be an incomlete dict, in which case only the specified
properties are set.
Parameters
----------
state : dict
The camera state.
**kwargs : dict
Unused keyword arguments.
"""
D = state or {}
D.update(kwargs)
for key, val in D.items():
if key not in self._state_props:
raise KeyError('Not a valid camera state property %r' % key)
setattr(self, key, val) |
def _evaluate_objective_multiple(objective_function, arg_batch,
batch_evaluate_objective):
"""Evaluates the objective function on a batch of points.
If `batch_evaluate_objective` is True, returns
`objective function(arg_batch)` else it maps the `objective_function`
across the `arg_batch`.
Args:
objective_function: A Python callable that accepts a single `Tensor` of
rank 'R > 1' and any shape 's' and returns a scalar `Tensor` of real dtype
containing the value of the function at that point. If
`batch a `Tensor` of shape `[batch_size] + s ` where `batch_size` is the
size of the batch of args. In this case, the expected return value is a
`Tensor` of shape `[batch_size]`.
arg_batch: A `Tensor` of real dtype. The batch of arguments at which to
evaluate the `objective_function`. If `batch_evaluate_objective` is False,
`arg_batch` will be unpacked along the zeroth axis and the
`objective_function` will be applied to each element.
batch_evaluate_objective: `bool`. Whether the `objective_function` can
evaluate a batch of arguments at once.
Returns:
A tuple containing:
objective_values: A `Tensor` of real dtype and shape `[batch_size]`.
The value of the objective function evaluated at the supplied
`arg_batch`.
num_evaluations: An `int32` scalar `Tensor`containing the number of
points on which the objective function was evaluated (i.e `batch_size`).
"""
n_points = tf.shape(input=arg_batch)[0]
if batch_evaluate_objective:
return objective_function(arg_batch), n_points
return tf.map_fn(objective_function, arg_batch), n_points | Evaluates the objective function on a batch of points.
If `batch_evaluate_objective` is True, returns
`objective function(arg_batch)` else it maps the `objective_function`
across the `arg_batch`.
Args:
objective_function: A Python callable that accepts a single `Tensor` of
rank 'R > 1' and any shape 's' and returns a scalar `Tensor` of real dtype
containing the value of the function at that point. If
`batch a `Tensor` of shape `[batch_size] + s ` where `batch_size` is the
size of the batch of args. In this case, the expected return value is a
`Tensor` of shape `[batch_size]`.
arg_batch: A `Tensor` of real dtype. The batch of arguments at which to
evaluate the `objective_function`. If `batch_evaluate_objective` is False,
`arg_batch` will be unpacked along the zeroth axis and the
`objective_function` will be applied to each element.
batch_evaluate_objective: `bool`. Whether the `objective_function` can
evaluate a batch of arguments at once.
Returns:
A tuple containing:
objective_values: A `Tensor` of real dtype and shape `[batch_size]`.
The value of the objective function evaluated at the supplied
`arg_batch`.
num_evaluations: An `int32` scalar `Tensor`containing the number of
points on which the objective function was evaluated (i.e `batch_size`). | Below is the the instruction that describes the task:
### Input:
Evaluates the objective function on a batch of points.
If `batch_evaluate_objective` is True, returns
`objective function(arg_batch)` else it maps the `objective_function`
across the `arg_batch`.
Args:
objective_function: A Python callable that accepts a single `Tensor` of
rank 'R > 1' and any shape 's' and returns a scalar `Tensor` of real dtype
containing the value of the function at that point. If
`batch a `Tensor` of shape `[batch_size] + s ` where `batch_size` is the
size of the batch of args. In this case, the expected return value is a
`Tensor` of shape `[batch_size]`.
arg_batch: A `Tensor` of real dtype. The batch of arguments at which to
evaluate the `objective_function`. If `batch_evaluate_objective` is False,
`arg_batch` will be unpacked along the zeroth axis and the
`objective_function` will be applied to each element.
batch_evaluate_objective: `bool`. Whether the `objective_function` can
evaluate a batch of arguments at once.
Returns:
A tuple containing:
objective_values: A `Tensor` of real dtype and shape `[batch_size]`.
The value of the objective function evaluated at the supplied
`arg_batch`.
num_evaluations: An `int32` scalar `Tensor`containing the number of
points on which the objective function was evaluated (i.e `batch_size`).
### Response:
def _evaluate_objective_multiple(objective_function, arg_batch,
batch_evaluate_objective):
"""Evaluates the objective function on a batch of points.
If `batch_evaluate_objective` is True, returns
`objective function(arg_batch)` else it maps the `objective_function`
across the `arg_batch`.
Args:
objective_function: A Python callable that accepts a single `Tensor` of
rank 'R > 1' and any shape 's' and returns a scalar `Tensor` of real dtype
containing the value of the function at that point. If
`batch a `Tensor` of shape `[batch_size] + s ` where `batch_size` is the
size of the batch of args. In this case, the expected return value is a
`Tensor` of shape `[batch_size]`.
arg_batch: A `Tensor` of real dtype. The batch of arguments at which to
evaluate the `objective_function`. If `batch_evaluate_objective` is False,
`arg_batch` will be unpacked along the zeroth axis and the
`objective_function` will be applied to each element.
batch_evaluate_objective: `bool`. Whether the `objective_function` can
evaluate a batch of arguments at once.
Returns:
A tuple containing:
objective_values: A `Tensor` of real dtype and shape `[batch_size]`.
The value of the objective function evaluated at the supplied
`arg_batch`.
num_evaluations: An `int32` scalar `Tensor`containing the number of
points on which the objective function was evaluated (i.e `batch_size`).
"""
n_points = tf.shape(input=arg_batch)[0]
if batch_evaluate_objective:
return objective_function(arg_batch), n_points
return tf.map_fn(objective_function, arg_batch), n_points |
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False,
concat_softmax=False):
"""As described in https://arxiv.org/abs/1511.06581"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("state_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
state_hidden = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
state_score = noisy_dense(state_hidden, name='noisy_fc2',
size=1)
else:
state_hidden = layers.fully_connected(
out,
num_outputs=512,
activation_fn=tf.nn.relu
)
state_score = layers.fully_connected(state_hidden,
num_outputs=1,
activation_fn=None)
with tf.variable_scope("action_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
actions_hidden = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
action_scores = noisy_dense(actions_hidden, name='noisy_fc2',
size=num_actions)
else:
actions_hidden = layers.fully_connected(
out,
num_outputs=512,
activation_fn=tf.nn.relu
)
action_scores = layers.fully_connected(
actions_hidden,
num_outputs=num_actions,
activation_fn=None
)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(
action_scores_mean,
1
)
return state_score + action_scores | As described in https://arxiv.org/abs/1511.06581 | Below is the the instruction that describes the task:
### Input:
As described in https://arxiv.org/abs/1511.06581
### Response:
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False,
concat_softmax=False):
"""As described in https://arxiv.org/abs/1511.06581"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("state_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
state_hidden = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
state_score = noisy_dense(state_hidden, name='noisy_fc2',
size=1)
else:
state_hidden = layers.fully_connected(
out,
num_outputs=512,
activation_fn=tf.nn.relu
)
state_score = layers.fully_connected(state_hidden,
num_outputs=1,
activation_fn=None)
with tf.variable_scope("action_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
actions_hidden = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
action_scores = noisy_dense(actions_hidden, name='noisy_fc2',
size=num_actions)
else:
actions_hidden = layers.fully_connected(
out,
num_outputs=512,
activation_fn=tf.nn.relu
)
action_scores = layers.fully_connected(
actions_hidden,
num_outputs=num_actions,
activation_fn=None
)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(
action_scores_mean,
1
)
return state_score + action_scores |
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname) | Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname | Below is the the instruction that describes the task:
### Input:
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
### Response:
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname) |
def generate_filename(self, instance, filename):
"""
removes UTF chars from filename
"""
from unidecode import unidecode
return super().generate_filename(instance, unidecode(force_text(filename))) | removes UTF chars from filename | Below is the the instruction that describes the task:
### Input:
removes UTF chars from filename
### Response:
def generate_filename(self, instance, filename):
"""
removes UTF chars from filename
"""
from unidecode import unidecode
return super().generate_filename(instance, unidecode(force_text(filename))) |
def is_valid_resource_name(rname, exception_type=None):
"""Validates the given resource name to ARM guidelines, individual services may be more restrictive.
:param rname: The resource name being validated.
:type rname: str
:param exception_type: Raises this Exception if invalid.
:type exception_type: :class:`Exception`
:returns: A boolean describing whether the name is valid.
:rtype: bool
"""
match = _ARMNAME_RE.match(rname)
if match:
return True
if exception_type:
raise exception_type()
return False | Validates the given resource name to ARM guidelines, individual services may be more restrictive.
:param rname: The resource name being validated.
:type rname: str
:param exception_type: Raises this Exception if invalid.
:type exception_type: :class:`Exception`
:returns: A boolean describing whether the name is valid.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Validates the given resource name to ARM guidelines, individual services may be more restrictive.
:param rname: The resource name being validated.
:type rname: str
:param exception_type: Raises this Exception if invalid.
:type exception_type: :class:`Exception`
:returns: A boolean describing whether the name is valid.
:rtype: bool
### Response:
def is_valid_resource_name(rname, exception_type=None):
"""Validates the given resource name to ARM guidelines, individual services may be more restrictive.
:param rname: The resource name being validated.
:type rname: str
:param exception_type: Raises this Exception if invalid.
:type exception_type: :class:`Exception`
:returns: A boolean describing whether the name is valid.
:rtype: bool
"""
match = _ARMNAME_RE.match(rname)
if match:
return True
if exception_type:
raise exception_type()
return False |
def login(self, username=None, password=None):
"""
登陆用户。如果用户名和密码正确,服务器会返回用户的 sessionToken 。
"""
if username:
self.set('username', username)
if password:
self.set('password', password)
response = client.post('/login', params=self.dump())
content = response.json()
self._update_data(content)
self._handle_save_result(True)
if 'smsCode' not in content:
self._attributes.pop('smsCode', None) | 登陆用户。如果用户名和密码正确,服务器会返回用户的 sessionToken 。 | Below is the the instruction that describes the task:
### Input:
登陆用户。如果用户名和密码正确,服务器会返回用户的 sessionToken 。
### Response:
def login(self, username=None, password=None):
"""
登陆用户。如果用户名和密码正确,服务器会返回用户的 sessionToken 。
"""
if username:
self.set('username', username)
if password:
self.set('password', password)
response = client.post('/login', params=self.dump())
content = response.json()
self._update_data(content)
self._handle_save_result(True)
if 'smsCode' not in content:
self._attributes.pop('smsCode', None) |
def _map_to_memory(self, stride=1):
r"""Maps results to memory. Will be stored in attribute :attr:`_Y`."""
self._mapping_to_mem_active = True
try:
self._Y = self.get_output(stride=stride)
from pyemma.coordinates.data import DataInMemory
self._Y_source = DataInMemory(self._Y)
finally:
self._mapping_to_mem_active = False
self._in_memory = True | r"""Maps results to memory. Will be stored in attribute :attr:`_Y`. | Below is the the instruction that describes the task:
### Input:
r"""Maps results to memory. Will be stored in attribute :attr:`_Y`.
### Response:
def _map_to_memory(self, stride=1):
r"""Maps results to memory. Will be stored in attribute :attr:`_Y`."""
self._mapping_to_mem_active = True
try:
self._Y = self.get_output(stride=stride)
from pyemma.coordinates.data import DataInMemory
self._Y_source = DataInMemory(self._Y)
finally:
self._mapping_to_mem_active = False
self._in_memory = True |
def _emp_extra_options(options):
"""
Get special options patch, cols, and splits if analysis in emp module
"""
# Check that metadata is valid
metadata_path = os.path.normpath(os.path.join(options['param_dir'],
options['metadata']))
if not os.path.isfile(metadata_path):
raise IOError, ("Path to metadata file %s is invalid." %
metadata_path)
options['metadata_path'] = metadata_path
# Using subset if given, create and store patch
subset = options.get('subset', '')
options['patch'] = emp.Patch(metadata_path, subset)
# If cols or splits not given in options, make empty strings
if 'cols' not in options.keys():
options['cols'] = ''
if 'splits' not in options.keys():
options['splits'] = ''
return options | Get special options patch, cols, and splits if analysis in emp module | Below is the the instruction that describes the task:
### Input:
Get special options patch, cols, and splits if analysis in emp module
### Response:
def _emp_extra_options(options):
"""
Get special options patch, cols, and splits if analysis in emp module
"""
# Check that metadata is valid
metadata_path = os.path.normpath(os.path.join(options['param_dir'],
options['metadata']))
if not os.path.isfile(metadata_path):
raise IOError, ("Path to metadata file %s is invalid." %
metadata_path)
options['metadata_path'] = metadata_path
# Using subset if given, create and store patch
subset = options.get('subset', '')
options['patch'] = emp.Patch(metadata_path, subset)
# If cols or splits not given in options, make empty strings
if 'cols' not in options.keys():
options['cols'] = ''
if 'splits' not in options.keys():
options['splits'] = ''
return options |
def on_train_begin(self, **kwargs: Any) -> None:
"Prepare file with metric names."
self.path.parent.mkdir(parents=True, exist_ok=True)
self.file = self.path.open('a') if self.append else self.path.open('w')
self.file.write(','.join(self.learn.recorder.names[:(None if self.add_time else -1)]) + '\n') | Prepare file with metric names. | Below is the the instruction that describes the task:
### Input:
Prepare file with metric names.
### Response:
def on_train_begin(self, **kwargs: Any) -> None:
"Prepare file with metric names."
self.path.parent.mkdir(parents=True, exist_ok=True)
self.file = self.path.open('a') if self.append else self.path.open('w')
self.file.write(','.join(self.learn.recorder.names[:(None if self.add_time else -1)]) + '\n') |
def filter_backends(backends, filters=None, **kwargs):
"""Return the backends matching the specified filtering.
Filter the `backends` list by their `configuration` or `status`
attributes, or from a boolean callable. The criteria for filtering can
be specified via `**kwargs` or as a callable via `filters`, and the
backends must fulfill all specified conditions.
Args:
backends (list[BaseBackend]): list of backends.
filters (callable): filtering conditions as a callable.
**kwargs (dict): dict of criteria.
Returns:
list[BaseBackend]: a list of backend instances matching the
conditions.
"""
def _match_all(obj, criteria):
"""Return True if all items in criteria matches items in obj."""
return all(getattr(obj, key_, None) == value_ for
key_, value_ in criteria.items())
# Inspect the backends to decide which filters belong to
# backend.configuration and which ones to backend.status, as it does
# not involve querying the API.
configuration_filters = {}
status_filters = {}
for key, value in kwargs.items():
if all(key in backend.configuration() for backend in backends):
configuration_filters[key] = value
else:
status_filters[key] = value
# 1. Apply backend.configuration filtering.
if configuration_filters:
backends = [b for b in backends if
_match_all(b.configuration(), configuration_filters)]
# 2. Apply backend.status filtering (it involves one API call for
# each backend).
if status_filters:
backends = [b for b in backends if
_match_all(b.status(), status_filters)]
# 3. Apply acceptor filter.
backends = list(filter(filters, backends))
return backends | Return the backends matching the specified filtering.
Filter the `backends` list by their `configuration` or `status`
attributes, or from a boolean callable. The criteria for filtering can
be specified via `**kwargs` or as a callable via `filters`, and the
backends must fulfill all specified conditions.
Args:
backends (list[BaseBackend]): list of backends.
filters (callable): filtering conditions as a callable.
**kwargs (dict): dict of criteria.
Returns:
list[BaseBackend]: a list of backend instances matching the
conditions. | Below is the the instruction that describes the task:
### Input:
Return the backends matching the specified filtering.
Filter the `backends` list by their `configuration` or `status`
attributes, or from a boolean callable. The criteria for filtering can
be specified via `**kwargs` or as a callable via `filters`, and the
backends must fulfill all specified conditions.
Args:
backends (list[BaseBackend]): list of backends.
filters (callable): filtering conditions as a callable.
**kwargs (dict): dict of criteria.
Returns:
list[BaseBackend]: a list of backend instances matching the
conditions.
### Response:
def filter_backends(backends, filters=None, **kwargs):
"""Return the backends matching the specified filtering.
Filter the `backends` list by their `configuration` or `status`
attributes, or from a boolean callable. The criteria for filtering can
be specified via `**kwargs` or as a callable via `filters`, and the
backends must fulfill all specified conditions.
Args:
backends (list[BaseBackend]): list of backends.
filters (callable): filtering conditions as a callable.
**kwargs (dict): dict of criteria.
Returns:
list[BaseBackend]: a list of backend instances matching the
conditions.
"""
def _match_all(obj, criteria):
"""Return True if all items in criteria matches items in obj."""
return all(getattr(obj, key_, None) == value_ for
key_, value_ in criteria.items())
# Inspect the backends to decide which filters belong to
# backend.configuration and which ones to backend.status, as it does
# not involve querying the API.
configuration_filters = {}
status_filters = {}
for key, value in kwargs.items():
if all(key in backend.configuration() for backend in backends):
configuration_filters[key] = value
else:
status_filters[key] = value
# 1. Apply backend.configuration filtering.
if configuration_filters:
backends = [b for b in backends if
_match_all(b.configuration(), configuration_filters)]
# 2. Apply backend.status filtering (it involves one API call for
# each backend).
if status_filters:
backends = [b for b in backends if
_match_all(b.status(), status_filters)]
# 3. Apply acceptor filter.
backends = list(filter(filters, backends))
return backends |
def read_namespaced_pod_disruption_budget(self, name, namespace, **kwargs):
"""
read the specified PodDisruptionBudget
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_disruption_budget(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodDisruptionBudget (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1PodDisruptionBudget
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_disruption_budget_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_pod_disruption_budget_with_http_info(name, namespace, **kwargs)
return data | read the specified PodDisruptionBudget
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_disruption_budget(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodDisruptionBudget (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1PodDisruptionBudget
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
read the specified PodDisruptionBudget
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_disruption_budget(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodDisruptionBudget (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1PodDisruptionBudget
If the method is called asynchronously,
returns the request thread.
### Response:
def read_namespaced_pod_disruption_budget(self, name, namespace, **kwargs):
"""
read the specified PodDisruptionBudget
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_disruption_budget(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodDisruptionBudget (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1PodDisruptionBudget
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_disruption_budget_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_pod_disruption_budget_with_http_info(name, namespace, **kwargs)
return data |
def downsync(section, map_files):
"""
For each section defined in the local config file, creates a folder inside the local config folder
named after the section. Downloads the environemnt file defined by the S3CONF variable for this section
to this folder.
"""
try:
settings = config.Settings(section=section)
storage = STORAGES['s3'](settings=settings)
conf = s3conf.S3Conf(storage=storage, settings=settings)
local_root = os.path.join(config.LOCAL_CONFIG_FOLDER, section)
conf.downsync(local_root, map_files=map_files)
except exceptions.EnvfilePathNotDefinedError:
raise exceptions.EnvfilePathNotDefinedUsageError() | For each section defined in the local config file, creates a folder inside the local config folder
named after the section. Downloads the environemnt file defined by the S3CONF variable for this section
to this folder. | Below is the the instruction that describes the task:
### Input:
For each section defined in the local config file, creates a folder inside the local config folder
named after the section. Downloads the environemnt file defined by the S3CONF variable for this section
to this folder.
### Response:
def downsync(section, map_files):
"""
For each section defined in the local config file, creates a folder inside the local config folder
named after the section. Downloads the environemnt file defined by the S3CONF variable for this section
to this folder.
"""
try:
settings = config.Settings(section=section)
storage = STORAGES['s3'](settings=settings)
conf = s3conf.S3Conf(storage=storage, settings=settings)
local_root = os.path.join(config.LOCAL_CONFIG_FOLDER, section)
conf.downsync(local_root, map_files=map_files)
except exceptions.EnvfilePathNotDefinedError:
raise exceptions.EnvfilePathNotDefinedUsageError() |
def get_templates(model):
""" Return a list of templates usable by a model. """
for template_name, template in templates.items():
if issubclass(template.model, model):
yield (template_name, template.layout._meta.verbose_name) | Return a list of templates usable by a model. | Below is the the instruction that describes the task:
### Input:
Return a list of templates usable by a model.
### Response:
def get_templates(model):
""" Return a list of templates usable by a model. """
for template_name, template in templates.items():
if issubclass(template.model, model):
yield (template_name, template.layout._meta.verbose_name) |
def to_tex(self, text_size='large', table_width=5, clear_pages = False):
"""
Write the program information to a .tex file, which can be
rendered to .pdf running pdflatex. The program can then be
printed and brought to the gym.
Parameters
----------
text_size
The tex text size, e.g. '\small', 'normalsize', 'large', 'Large'
or 'LARGE'.
table_width
The table with of the .tex code.
Returns
-------
string
Program as tex.
"""
# If rendered, find the length of the longest '6 x 75kg'-type string
max_ex_scheme = 0
if self._rendered:
for (week, day, dynamic_ex) in self._yield_week_day_dynamic():
lengths = [len(s) for s in
self._rendered[week][day][dynamic_ex]['strings']]
max_ex_scheme = max(max_ex_scheme, max(lengths))
env = self.jinja2_environment
template = env.get_template(self.TEMPLATE_NAMES['tex'])
return template.render(program=self, text_size=text_size,
table_width=table_width, clear_pages = clear_pages) | Write the program information to a .tex file, which can be
rendered to .pdf running pdflatex. The program can then be
printed and brought to the gym.
Parameters
----------
text_size
The tex text size, e.g. '\small', 'normalsize', 'large', 'Large'
or 'LARGE'.
table_width
The table with of the .tex code.
Returns
-------
string
Program as tex. | Below is the the instruction that describes the task:
### Input:
Write the program information to a .tex file, which can be
rendered to .pdf running pdflatex. The program can then be
printed and brought to the gym.
Parameters
----------
text_size
The tex text size, e.g. '\small', 'normalsize', 'large', 'Large'
or 'LARGE'.
table_width
The table with of the .tex code.
Returns
-------
string
Program as tex.
### Response:
def to_tex(self, text_size='large', table_width=5, clear_pages = False):
"""
Write the program information to a .tex file, which can be
rendered to .pdf running pdflatex. The program can then be
printed and brought to the gym.
Parameters
----------
text_size
The tex text size, e.g. '\small', 'normalsize', 'large', 'Large'
or 'LARGE'.
table_width
The table with of the .tex code.
Returns
-------
string
Program as tex.
"""
# If rendered, find the length of the longest '6 x 75kg'-type string
max_ex_scheme = 0
if self._rendered:
for (week, day, dynamic_ex) in self._yield_week_day_dynamic():
lengths = [len(s) for s in
self._rendered[week][day][dynamic_ex]['strings']]
max_ex_scheme = max(max_ex_scheme, max(lengths))
env = self.jinja2_environment
template = env.get_template(self.TEMPLATE_NAMES['tex'])
return template.render(program=self, text_size=text_size,
table_width=table_width, clear_pages = clear_pages) |
def install(apk, opts=[]):
"""
Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_INSTALL, _convert_opts(opts), apk]
return _exec_command(adb_full_cmd) | Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution | Below is the the instruction that describes the task:
### Input:
Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
### Response:
def install(apk, opts=[]):
"""
Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_INSTALL, _convert_opts(opts), apk]
return _exec_command(adb_full_cmd) |
def paintEvent(self, event):
"""
Reimplements the :meth:`*.paintEvent` method.
:param event: QEvent.
:type event: QEvent
"""
super(type(self), self).paintEvent(event)
show_message = True
model = self.model()
if issubclass(type(model), GraphModel):
if model.has_nodes():
show_message = False
elif issubclass(type(model), QAbstractItemModel) or \
issubclass(type(model), QAbstractListModel) or \
issubclass(type(model), QAbstractTableModel):
if model.rowCount():
show_message = False
if show_message:
self.__notifier.show_message(self.__message, 0)
else:
self.__notifier.hide_message() | Reimplements the :meth:`*.paintEvent` method.
:param event: QEvent.
:type event: QEvent | Below is the the instruction that describes the task:
### Input:
Reimplements the :meth:`*.paintEvent` method.
:param event: QEvent.
:type event: QEvent
### Response:
def paintEvent(self, event):
"""
Reimplements the :meth:`*.paintEvent` method.
:param event: QEvent.
:type event: QEvent
"""
super(type(self), self).paintEvent(event)
show_message = True
model = self.model()
if issubclass(type(model), GraphModel):
if model.has_nodes():
show_message = False
elif issubclass(type(model), QAbstractItemModel) or \
issubclass(type(model), QAbstractListModel) or \
issubclass(type(model), QAbstractTableModel):
if model.rowCount():
show_message = False
if show_message:
self.__notifier.show_message(self.__message, 0)
else:
self.__notifier.hide_message() |
def create_exception(error_codec):
"""
Creates an exception with given error codec.
:param error_codec: (Error Codec), error codec which includes the class name, message and exception trace.
:return: (Exception), the created exception.
"""
if error_codec.error_code in ERROR_CODE_TO_ERROR:
return ERROR_CODE_TO_ERROR[error_codec.error_code](error_codec.message)
stack_trace = "\n".join(
["\tat %s.%s(%s:%s)" % (x.declaring_class, x.method_name, x.file_name, x.line_number) for x in
error_codec.stack_trace])
message = "Got exception from server:\n %s: %s\n %s" % (error_codec.class_name,
error_codec.message,
stack_trace)
return HazelcastError(message) | Creates an exception with given error codec.
:param error_codec: (Error Codec), error codec which includes the class name, message and exception trace.
:return: (Exception), the created exception. | Below is the the instruction that describes the task:
### Input:
Creates an exception with given error codec.
:param error_codec: (Error Codec), error codec which includes the class name, message and exception trace.
:return: (Exception), the created exception.
### Response:
def create_exception(error_codec):
"""
Creates an exception with given error codec.
:param error_codec: (Error Codec), error codec which includes the class name, message and exception trace.
:return: (Exception), the created exception.
"""
if error_codec.error_code in ERROR_CODE_TO_ERROR:
return ERROR_CODE_TO_ERROR[error_codec.error_code](error_codec.message)
stack_trace = "\n".join(
["\tat %s.%s(%s:%s)" % (x.declaring_class, x.method_name, x.file_name, x.line_number) for x in
error_codec.stack_trace])
message = "Got exception from server:\n %s: %s\n %s" % (error_codec.class_name,
error_codec.message,
stack_trace)
return HazelcastError(message) |
def verify_request(self):
"""
Verify LTI request
:raises: LTIException if request validation failed
"""
request = self.lti_kwargs['app'].current_request
if request.method == 'POST':
# Chalice expects JSON and does not nativly support forms data in
# a post body. The below is copied from the parsing of query
# strings as implimented in match_route of Chalice local.py
parsed_url = request.raw_body.decode()
parsed_qs = parse_qs(parsed_url, keep_blank_values=True)
params = {k: v[0] for k, v in parsed_qs .items()}
else:
params = request.query_params
log.debug(params)
log.debug('verify_request?')
try:
# Chalice does not have a url property therefore building it.
protocol = request.headers.get('x-forwarded-proto', 'http')
hostname = request.headers['host']
path = request.context['path']
url = urlunparse((protocol, hostname, path, "", "", ""))
verify_request_common(self._consumers(), url,
request.method, request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
self.session[prop] = params[prop]
# Set logged in session key
self.session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if self.session.get(prop, None):
del self.session[prop]
self.session[LTI_SESSION_KEY] = False
raise | Verify LTI request
:raises: LTIException if request validation failed | Below is the the instruction that describes the task:
### Input:
Verify LTI request
:raises: LTIException if request validation failed
### Response:
def verify_request(self):
"""
Verify LTI request
:raises: LTIException if request validation failed
"""
request = self.lti_kwargs['app'].current_request
if request.method == 'POST':
# Chalice expects JSON and does not nativly support forms data in
# a post body. The below is copied from the parsing of query
# strings as implimented in match_route of Chalice local.py
parsed_url = request.raw_body.decode()
parsed_qs = parse_qs(parsed_url, keep_blank_values=True)
params = {k: v[0] for k, v in parsed_qs .items()}
else:
params = request.query_params
log.debug(params)
log.debug('verify_request?')
try:
# Chalice does not have a url property therefore building it.
protocol = request.headers.get('x-forwarded-proto', 'http')
hostname = request.headers['host']
path = request.context['path']
url = urlunparse((protocol, hostname, path, "", "", ""))
verify_request_common(self._consumers(), url,
request.method, request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
self.session[prop] = params[prop]
# Set logged in session key
self.session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if self.session.get(prop, None):
del self.session[prop]
self.session[LTI_SESSION_KEY] = False
raise |
def threshold_monitor_hidden_threshold_monitor_Memory_limit(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor")
threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor")
Memory = ET.SubElement(threshold_monitor, "Memory")
limit = ET.SubElement(Memory, "limit")
limit.text = kwargs.pop('limit')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def threshold_monitor_hidden_threshold_monitor_Memory_limit(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor")
threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor")
Memory = ET.SubElement(threshold_monitor, "Memory")
limit = ET.SubElement(Memory, "limit")
limit.text = kwargs.pop('limit')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def find_best_frametype(channel, start, end,
frametype_match=None, allow_tape=True,
connection=None, host=None, port=None):
"""Intelligently select the best frametype from which to read this channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
frametype_match : `str`, optiona
regular expression to use for frametype `str` matching
allow_tape : `bool`, optional
do not test types whose frame files are stored on tape (not on
spinning disk)
Returns
-------
frametype : `str`
the best matching frametype for the ``channel`` in the
``[start, end)`` interval
Raises
------
ValueError
if no valid frametypes are found
Examples
--------
>>> from gwpy.io.datafind import find_best_frametype
>>> find_best_frametype('L1:GDS-CALIB_STRAIN', 1126259460, 1126259464)
'L1_HOFT_C00'
"""
try:
return find_frametype(channel, gpstime=(start, end),
frametype_match=frametype_match,
allow_tape=allow_tape, on_gaps='error',
connection=connection, host=host, port=port)
except RuntimeError: # gaps (or something else went wrong)
ftout = find_frametype(channel, gpstime=(start, end),
frametype_match=frametype_match,
return_all=True, allow_tape=allow_tape,
on_gaps='ignore', connection=connection,
host=host, port=port)
try:
if isinstance(ftout, dict):
return {key: ftout[key][0] for key in ftout}
return ftout[0]
except IndexError:
raise ValueError("Cannot find any valid frametypes for channel(s)") | Intelligently select the best frametype from which to read this channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
frametype_match : `str`, optiona
regular expression to use for frametype `str` matching
allow_tape : `bool`, optional
do not test types whose frame files are stored on tape (not on
spinning disk)
Returns
-------
frametype : `str`
the best matching frametype for the ``channel`` in the
``[start, end)`` interval
Raises
------
ValueError
if no valid frametypes are found
Examples
--------
>>> from gwpy.io.datafind import find_best_frametype
>>> find_best_frametype('L1:GDS-CALIB_STRAIN', 1126259460, 1126259464)
'L1_HOFT_C00' | Below is the the instruction that describes the task:
### Input:
Intelligently select the best frametype from which to read this channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
frametype_match : `str`, optiona
regular expression to use for frametype `str` matching
allow_tape : `bool`, optional
do not test types whose frame files are stored on tape (not on
spinning disk)
Returns
-------
frametype : `str`
the best matching frametype for the ``channel`` in the
``[start, end)`` interval
Raises
------
ValueError
if no valid frametypes are found
Examples
--------
>>> from gwpy.io.datafind import find_best_frametype
>>> find_best_frametype('L1:GDS-CALIB_STRAIN', 1126259460, 1126259464)
'L1_HOFT_C00'
### Response:
def find_best_frametype(channel, start, end,
frametype_match=None, allow_tape=True,
connection=None, host=None, port=None):
"""Intelligently select the best frametype from which to read this channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
frametype_match : `str`, optiona
regular expression to use for frametype `str` matching
allow_tape : `bool`, optional
do not test types whose frame files are stored on tape (not on
spinning disk)
Returns
-------
frametype : `str`
the best matching frametype for the ``channel`` in the
``[start, end)`` interval
Raises
------
ValueError
if no valid frametypes are found
Examples
--------
>>> from gwpy.io.datafind import find_best_frametype
>>> find_best_frametype('L1:GDS-CALIB_STRAIN', 1126259460, 1126259464)
'L1_HOFT_C00'
"""
try:
return find_frametype(channel, gpstime=(start, end),
frametype_match=frametype_match,
allow_tape=allow_tape, on_gaps='error',
connection=connection, host=host, port=port)
except RuntimeError: # gaps (or something else went wrong)
ftout = find_frametype(channel, gpstime=(start, end),
frametype_match=frametype_match,
return_all=True, allow_tape=allow_tape,
on_gaps='ignore', connection=connection,
host=host, port=port)
try:
if isinstance(ftout, dict):
return {key: ftout[key][0] for key in ftout}
return ftout[0]
except IndexError:
raise ValueError("Cannot find any valid frametypes for channel(s)") |
def add_files(self, files):
"""Add files and/or folders to transfer.
If :class:`Transfer.compress` attribute is set to ``True``, files
will get packed into a zip file before sending.
:param files: Files or folders to send
:type files: str, list
"""
if isinstance(files, basestring):
files = [files]
zip_file = None
if self.zip_:
zip_filename = self._get_zip_filename()
zip_file = ZipFile(zip_filename, 'w')
for filename in files:
if os.path.isdir(filename):
for dirname, subdirs, filelist in os.walk(filename):
if dirname:
if self.zip_:
zip_file.write(dirname)
for fname in filelist:
filepath = os.path.join(dirname, fname)
if self.zip_:
zip_file.write(filepath)
else:
fmfile = self.get_file_specs(filepath,
keep_folders=True)
if fmfile['totalsize'] > 0:
self._files.append(fmfile)
else:
if self.zip_:
zip_file.write(filename)
else:
fmfile = self.get_file_specs(filename)
self._files.append(fmfile)
if self.zip_:
zip_file.close()
filename = zip_filename
fmfile = self.get_file_specs(filename)
self._files.append(fmfile) | Add files and/or folders to transfer.
If :class:`Transfer.compress` attribute is set to ``True``, files
will get packed into a zip file before sending.
:param files: Files or folders to send
:type files: str, list | Below is the the instruction that describes the task:
### Input:
Add files and/or folders to transfer.
If :class:`Transfer.compress` attribute is set to ``True``, files
will get packed into a zip file before sending.
:param files: Files or folders to send
:type files: str, list
### Response:
def add_files(self, files):
"""Add files and/or folders to transfer.
If :class:`Transfer.compress` attribute is set to ``True``, files
will get packed into a zip file before sending.
:param files: Files or folders to send
:type files: str, list
"""
if isinstance(files, basestring):
files = [files]
zip_file = None
if self.zip_:
zip_filename = self._get_zip_filename()
zip_file = ZipFile(zip_filename, 'w')
for filename in files:
if os.path.isdir(filename):
for dirname, subdirs, filelist in os.walk(filename):
if dirname:
if self.zip_:
zip_file.write(dirname)
for fname in filelist:
filepath = os.path.join(dirname, fname)
if self.zip_:
zip_file.write(filepath)
else:
fmfile = self.get_file_specs(filepath,
keep_folders=True)
if fmfile['totalsize'] > 0:
self._files.append(fmfile)
else:
if self.zip_:
zip_file.write(filename)
else:
fmfile = self.get_file_specs(filename)
self._files.append(fmfile)
if self.zip_:
zip_file.close()
filename = zip_filename
fmfile = self.get_file_specs(filename)
self._files.append(fmfile) |
def writemessage(self, text):
"""Put data in output queue, rebuild the prompt and entered data"""
# Need to grab the input queue lock to ensure the entered data doesn't change
# before we're done rebuilding it.
# Note that writemessage will eventually call writecooked
self.IQUEUELOCK.acquire()
TelnetHandlerBase.writemessage(self, text)
self.IQUEUELOCK.release() | Put data in output queue, rebuild the prompt and entered data | Below is the the instruction that describes the task:
### Input:
Put data in output queue, rebuild the prompt and entered data
### Response:
def writemessage(self, text):
"""Put data in output queue, rebuild the prompt and entered data"""
# Need to grab the input queue lock to ensure the entered data doesn't change
# before we're done rebuilding it.
# Note that writemessage will eventually call writecooked
self.IQUEUELOCK.acquire()
TelnetHandlerBase.writemessage(self, text)
self.IQUEUELOCK.release() |
def decorate(self, func, limit, ttl, *anoop, **kwnoop):
"""make limit and ttl required"""
return super(ratelimit, self).decorate(func, limit, ttl, *anoop, **kwnoop) | make limit and ttl required | Below is the the instruction that describes the task:
### Input:
make limit and ttl required
### Response:
def decorate(self, func, limit, ttl, *anoop, **kwnoop):
"""make limit and ttl required"""
return super(ratelimit, self).decorate(func, limit, ttl, *anoop, **kwnoop) |
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text) | puts text in the entity. Whitespace and newlines are stripped to single spaces. | Below is the the instruction that describes the task:
### Input:
puts text in the entity. Whitespace and newlines are stripped to single spaces.
### Response:
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text) |
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file | r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth') | Below is the the instruction that describes the task:
### Input:
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
### Response:
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file |
def unregister_callback(self, callback_url, **kwargs):
"""
Unregister a callback.
Unregisters a callback URL that was previously white-listed with a **Register a
callback** request for use with the asynchronous interface. Once unregistered, the
URL can no longer be used with asynchronous recognition requests.
**See also:** [Unregistering a callback
URL](https://cloud.ibm.com/docs/services/speech-to-text/async.html#unregister).
:param str callback_url: The callback URL that is to be unregistered.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if callback_url is None:
raise ValueError('callback_url must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('speech_to_text', 'V1',
'unregister_callback')
headers.update(sdk_headers)
params = {'callback_url': callback_url}
url = '/v1/unregister_callback'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
accept_json=False)
return response | Unregister a callback.
Unregisters a callback URL that was previously white-listed with a **Register a
callback** request for use with the asynchronous interface. Once unregistered, the
URL can no longer be used with asynchronous recognition requests.
**See also:** [Unregistering a callback
URL](https://cloud.ibm.com/docs/services/speech-to-text/async.html#unregister).
:param str callback_url: The callback URL that is to be unregistered.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse | Below is the the instruction that describes the task:
### Input:
Unregister a callback.
Unregisters a callback URL that was previously white-listed with a **Register a
callback** request for use with the asynchronous interface. Once unregistered, the
URL can no longer be used with asynchronous recognition requests.
**See also:** [Unregistering a callback
URL](https://cloud.ibm.com/docs/services/speech-to-text/async.html#unregister).
:param str callback_url: The callback URL that is to be unregistered.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
### Response:
def unregister_callback(self, callback_url, **kwargs):
"""
Unregister a callback.
Unregisters a callback URL that was previously white-listed with a **Register a
callback** request for use with the asynchronous interface. Once unregistered, the
URL can no longer be used with asynchronous recognition requests.
**See also:** [Unregistering a callback
URL](https://cloud.ibm.com/docs/services/speech-to-text/async.html#unregister).
:param str callback_url: The callback URL that is to be unregistered.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if callback_url is None:
raise ValueError('callback_url must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('speech_to_text', 'V1',
'unregister_callback')
headers.update(sdk_headers)
params = {'callback_url': callback_url}
url = '/v1/unregister_callback'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
accept_json=False)
return response |
def resume(self) -> None:
"""Resume recording after pause.
Thread safe and UI safe."""
with self.__state_lock:
if self.__state == DataChannelBuffer.State.paused:
self.__state = DataChannelBuffer.State.started | Resume recording after pause.
Thread safe and UI safe. | Below is the the instruction that describes the task:
### Input:
Resume recording after pause.
Thread safe and UI safe.
### Response:
def resume(self) -> None:
"""Resume recording after pause.
Thread safe and UI safe."""
with self.__state_lock:
if self.__state == DataChannelBuffer.State.paused:
self.__state = DataChannelBuffer.State.started |
def merge(*projects):
"""
Merge zero or more dictionaries representing projects with the default
project dictionary and return the result
"""
result = {}
for project in projects:
for name, section in (project or {}).items():
if name not in PROJECT_SECTIONS:
raise ValueError(UNKNOWN_SECTION_ERROR % name)
if section is None:
result[name] = type(result[name])()
continue
if name in NOT_MERGEABLE + SPECIAL_CASE:
result[name] = section
continue
if section and not isinstance(section, (dict, str)):
cname = section.__class__.__name__
raise ValueError(SECTION_ISNT_DICT_ERROR % (name, cname))
if name == 'animation':
# Useful hack to allow you to load projects as animations.
adesc = load.load_if_filename(section)
if adesc:
section = adesc.get('animation', {})
section['run'] = adesc.get('run', {})
result_section = result.setdefault(name, {})
section = construct.to_type(section)
for k, v in section.items():
if v is None:
result_section.pop(k, None)
else:
result_section[k] = v
return result | Merge zero or more dictionaries representing projects with the default
project dictionary and return the result | Below is the the instruction that describes the task:
### Input:
Merge zero or more dictionaries representing projects with the default
project dictionary and return the result
### Response:
def merge(*projects):
"""
Merge zero or more dictionaries representing projects with the default
project dictionary and return the result
"""
result = {}
for project in projects:
for name, section in (project or {}).items():
if name not in PROJECT_SECTIONS:
raise ValueError(UNKNOWN_SECTION_ERROR % name)
if section is None:
result[name] = type(result[name])()
continue
if name in NOT_MERGEABLE + SPECIAL_CASE:
result[name] = section
continue
if section and not isinstance(section, (dict, str)):
cname = section.__class__.__name__
raise ValueError(SECTION_ISNT_DICT_ERROR % (name, cname))
if name == 'animation':
# Useful hack to allow you to load projects as animations.
adesc = load.load_if_filename(section)
if adesc:
section = adesc.get('animation', {})
section['run'] = adesc.get('run', {})
result_section = result.setdefault(name, {})
section = construct.to_type(section)
for k, v in section.items():
if v is None:
result_section.pop(k, None)
else:
result_section[k] = v
return result |
def name_resolve(self, name=None, recursive=False,
nocache=False, **kwargs):
"""Gets the value currently published at an IPNS name.
IPNS is a PKI namespace, where names are the hashes of public keys, and
the private key enables publishing new (signed) values. In resolve, the
default value of ``name`` is your own identity public key.
.. code-block:: python
>>> c.name_resolve()
{'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'}
Parameters
----------
name : str
The IPNS name to resolve (defaults to the connected node)
recursive : bool
Resolve until the result is not an IPFS name (default: false)
nocache : bool
Do not use cached entries (default: false)
Returns
-------
dict : The IPFS path the IPNS hash points at
"""
kwargs.setdefault("opts", {"recursive": recursive,
"nocache": nocache})
args = (name,) if name is not None else ()
return self._client.request('/name/resolve', args,
decoder='json', **kwargs) | Gets the value currently published at an IPNS name.
IPNS is a PKI namespace, where names are the hashes of public keys, and
the private key enables publishing new (signed) values. In resolve, the
default value of ``name`` is your own identity public key.
.. code-block:: python
>>> c.name_resolve()
{'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'}
Parameters
----------
name : str
The IPNS name to resolve (defaults to the connected node)
recursive : bool
Resolve until the result is not an IPFS name (default: false)
nocache : bool
Do not use cached entries (default: false)
Returns
-------
dict : The IPFS path the IPNS hash points at | Below is the the instruction that describes the task:
### Input:
Gets the value currently published at an IPNS name.
IPNS is a PKI namespace, where names are the hashes of public keys, and
the private key enables publishing new (signed) values. In resolve, the
default value of ``name`` is your own identity public key.
.. code-block:: python
>>> c.name_resolve()
{'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'}
Parameters
----------
name : str
The IPNS name to resolve (defaults to the connected node)
recursive : bool
Resolve until the result is not an IPFS name (default: false)
nocache : bool
Do not use cached entries (default: false)
Returns
-------
dict : The IPFS path the IPNS hash points at
### Response:
def name_resolve(self, name=None, recursive=False,
nocache=False, **kwargs):
"""Gets the value currently published at an IPNS name.
IPNS is a PKI namespace, where names are the hashes of public keys, and
the private key enables publishing new (signed) values. In resolve, the
default value of ``name`` is your own identity public key.
.. code-block:: python
>>> c.name_resolve()
{'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'}
Parameters
----------
name : str
The IPNS name to resolve (defaults to the connected node)
recursive : bool
Resolve until the result is not an IPFS name (default: false)
nocache : bool
Do not use cached entries (default: false)
Returns
-------
dict : The IPFS path the IPNS hash points at
"""
kwargs.setdefault("opts", {"recursive": recursive,
"nocache": nocache})
args = (name,) if name is not None else ()
return self._client.request('/name/resolve', args,
decoder='json', **kwargs) |
def apply(self):
"""Inherited from tkinter.simpledialog.Dialog"""
user_type = self.rb_choice.get()
if user_type == 'student' or user_type == 'tutor':
self.result = user_type | Inherited from tkinter.simpledialog.Dialog | Below is the the instruction that describes the task:
### Input:
Inherited from tkinter.simpledialog.Dialog
### Response:
def apply(self):
"""Inherited from tkinter.simpledialog.Dialog"""
user_type = self.rb_choice.get()
if user_type == 'student' or user_type == 'tutor':
self.result = user_type |
def bind_key(pymux, variables):
"""
Bind a key sequence.
-n: Not necessary to use the prefix.
"""
key = variables['<key>']
command = variables['<command>']
arguments = variables['<arguments>']
needs_prefix = not variables['-n']
try:
pymux.key_bindings_manager.add_custom_binding(
key, command, arguments, needs_prefix=needs_prefix)
except ValueError:
raise CommandException('Invalid key: %r' % (key, )) | Bind a key sequence.
-n: Not necessary to use the prefix. | Below is the the instruction that describes the task:
### Input:
Bind a key sequence.
-n: Not necessary to use the prefix.
### Response:
def bind_key(pymux, variables):
"""
Bind a key sequence.
-n: Not necessary to use the prefix.
"""
key = variables['<key>']
command = variables['<command>']
arguments = variables['<arguments>']
needs_prefix = not variables['-n']
try:
pymux.key_bindings_manager.add_custom_binding(
key, command, arguments, needs_prefix=needs_prefix)
except ValueError:
raise CommandException('Invalid key: %r' % (key, )) |
def cull_nodes(self, stat, threshold=0.5, comparator=ge):
"""Delete nodes whose stat >= ``threshold`` (default 0.5).
Optional argument ``comparator`` will replace >= as the test
for whether to cull. You can use the name of a stored function.
"""
comparator = self._lookup_comparator(comparator)
dead = [
name for name, node in self.node.items()
if stat in node and comparator(node[stat], threshold)
]
self.remove_nodes_from(dead)
return self | Delete nodes whose stat >= ``threshold`` (default 0.5).
Optional argument ``comparator`` will replace >= as the test
for whether to cull. You can use the name of a stored function. | Below is the the instruction that describes the task:
### Input:
Delete nodes whose stat >= ``threshold`` (default 0.5).
Optional argument ``comparator`` will replace >= as the test
for whether to cull. You can use the name of a stored function.
### Response:
def cull_nodes(self, stat, threshold=0.5, comparator=ge):
"""Delete nodes whose stat >= ``threshold`` (default 0.5).
Optional argument ``comparator`` will replace >= as the test
for whether to cull. You can use the name of a stored function.
"""
comparator = self._lookup_comparator(comparator)
dead = [
name for name, node in self.node.items()
if stat in node and comparator(node[stat], threshold)
]
self.remove_nodes_from(dead)
return self |
def publish(
self, resource_group_name, automation_account_name, runbook_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Publish runbook draft.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param runbook_name: The parameters supplied to the publish runbook
operation.
:type runbook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.automation.models.ErrorResponseException>`
"""
raw_result = self._publish_initial(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
runbook_name=runbook_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'location': 'str',
})
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | Publish runbook draft.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param runbook_name: The parameters supplied to the publish runbook
operation.
:type runbook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.automation.models.ErrorResponseException>` | Below is the the instruction that describes the task:
### Input:
Publish runbook draft.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param runbook_name: The parameters supplied to the publish runbook
operation.
:type runbook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.automation.models.ErrorResponseException>`
### Response:
def publish(
self, resource_group_name, automation_account_name, runbook_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Publish runbook draft.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param runbook_name: The parameters supplied to the publish runbook
operation.
:type runbook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.automation.models.ErrorResponseException>`
"""
raw_result = self._publish_initial(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
runbook_name=runbook_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'location': 'str',
})
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def update_refchip_with_shift(chip_wcs, wcslin, fitgeom='rscale',
rot=0.0, scale=1.0, xsh=0.0, ysh=0.0,
fit=None, xrms=None, yrms=None):
""" Compute the matrix for the scale and rotation correction
Parameters
----------
chip_wcs: wcs object
HST of the input image
wcslin: wcs object
Reference WCS from which the offsets/rotations are determined
fitgeom: str
NOT USED
rot : float
Amount of rotation measured in fit to be applied.
[Default=0.0]
scale : float
Amount of scale change measured in fit to be applied.
[Default=1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image.
[Default=0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image.
[Default=0.0]
fit : arr
Linear coefficients for fit
[Default = None]
xrms : float
RMS of fit in RA (in decimal degrees) that will be recorded as
CRDER1 in WCS and header
[Default = None]
yrms : float
RMS of fit in Dec (in decimal degrees) that will be recorded as
CRDER2 in WCS and header
[Default = None]
"""
# compute the matrix for the scale and rotation correction
if fit is None:
fit = linearfit.buildFitMatrix(rot, scale)
shift = np.asarray([xsh, ysh]) - np.dot(wcslin.wcs.crpix, fit) + wcslin.wcs.crpix
fit = np.linalg.inv(fit).T
cwcs = chip_wcs.deepcopy()
cd_eye = np.eye(chip_wcs.wcs.cd.shape[0], dtype=ndfloat128)
zero_shift = np.zeros(2, dtype=ndfloat128)
naxis1, naxis2 = chip_wcs.pixel_shape
# estimate precision necessary for iterative processes:
maxiter = 100
crpix2corners = np.dstack([i.flatten() for i in np.meshgrid(
[1, naxis1], [1, naxis2])])[0] - chip_wcs.wcs.crpix
maxUerr = 1.0e-5 / np.amax(np.linalg.norm(crpix2corners, axis=1))
# estimate step for numerical differentiation. We need a step
# large enough to avoid rounding errors and small enough to get a
# better precision for numerical differentiation.
# TODO: The logic below should be revised at a later time so that it
# better takes into account the two competing requirements.
hx = max(1.0, min(20.0, (chip_wcs.wcs.crpix[0] - 1.0) / 100.0,
(naxis1 - chip_wcs.wcs.crpix[0]) / 100.0))
hy = max(1.0, min(20.0, (chip_wcs.wcs.crpix[1] - 1.0) / 100.0,
(naxis2 - chip_wcs.wcs.crpix[1]) / 100.0))
# compute new CRVAL for the image WCS:
crpixinref = wcslin.wcs_world2pix(
chip_wcs.wcs_pix2world([chip_wcs.wcs.crpix],1),1)
crpixinref = np.dot(fit, (crpixinref - shift).T).T
chip_wcs.wcs.crval = wcslin.wcs_pix2world(crpixinref, 1)[0]
chip_wcs.wcs.set()
# initial approximation for CD matrix of the image WCS:
(U, u) = linearize(cwcs, chip_wcs, wcslin, chip_wcs.wcs.crpix,
fit, shift, hx=hx, hy=hy)
err0 = np.amax(np.abs(U-cd_eye)).astype(np.float64)
chip_wcs.wcs.cd = np.dot(chip_wcs.wcs.cd.astype(ndfloat128), U).astype(np.float64)
chip_wcs.wcs.set()
# NOTE: initial solution is the exact mathematical solution (modulo numeric
# differentiation). However, e.g., due to rounding errors, approximate
# numerical differentiation, the solution may be improved by performing
# several iterations. The next step will try to perform
# fixed-point iterations to "improve" the solution
# but this is not really required.
# Perform fixed-point iterations to improve the approximation
# for CD matrix of the image WCS (actually for the U matrix).
for i in range(maxiter):
(U, u) = linearize(chip_wcs, chip_wcs, wcslin, chip_wcs.wcs.crpix,
cd_eye, zero_shift, hx=hx, hy=hy)
err = np.amax(np.abs(U-cd_eye)).astype(np.float64)
if err > err0:
break
chip_wcs.wcs.cd = np.dot(chip_wcs.wcs.cd, U).astype(np.float64)
chip_wcs.wcs.set()
if err < maxUerr:
break
err0 = err
if xrms is not None:
chip_wcs.wcs.crder = np.array([xrms,yrms]) | Compute the matrix for the scale and rotation correction
Parameters
----------
chip_wcs: wcs object
HST of the input image
wcslin: wcs object
Reference WCS from which the offsets/rotations are determined
fitgeom: str
NOT USED
rot : float
Amount of rotation measured in fit to be applied.
[Default=0.0]
scale : float
Amount of scale change measured in fit to be applied.
[Default=1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image.
[Default=0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image.
[Default=0.0]
fit : arr
Linear coefficients for fit
[Default = None]
xrms : float
RMS of fit in RA (in decimal degrees) that will be recorded as
CRDER1 in WCS and header
[Default = None]
yrms : float
RMS of fit in Dec (in decimal degrees) that will be recorded as
CRDER2 in WCS and header
[Default = None] | Below is the the instruction that describes the task:
### Input:
Compute the matrix for the scale and rotation correction
Parameters
----------
chip_wcs: wcs object
HST of the input image
wcslin: wcs object
Reference WCS from which the offsets/rotations are determined
fitgeom: str
NOT USED
rot : float
Amount of rotation measured in fit to be applied.
[Default=0.0]
scale : float
Amount of scale change measured in fit to be applied.
[Default=1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image.
[Default=0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image.
[Default=0.0]
fit : arr
Linear coefficients for fit
[Default = None]
xrms : float
RMS of fit in RA (in decimal degrees) that will be recorded as
CRDER1 in WCS and header
[Default = None]
yrms : float
RMS of fit in Dec (in decimal degrees) that will be recorded as
CRDER2 in WCS and header
[Default = None]
### Response:
def update_refchip_with_shift(chip_wcs, wcslin, fitgeom='rscale',
rot=0.0, scale=1.0, xsh=0.0, ysh=0.0,
fit=None, xrms=None, yrms=None):
""" Compute the matrix for the scale and rotation correction
Parameters
----------
chip_wcs: wcs object
HST of the input image
wcslin: wcs object
Reference WCS from which the offsets/rotations are determined
fitgeom: str
NOT USED
rot : float
Amount of rotation measured in fit to be applied.
[Default=0.0]
scale : float
Amount of scale change measured in fit to be applied.
[Default=1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image.
[Default=0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image.
[Default=0.0]
fit : arr
Linear coefficients for fit
[Default = None]
xrms : float
RMS of fit in RA (in decimal degrees) that will be recorded as
CRDER1 in WCS and header
[Default = None]
yrms : float
RMS of fit in Dec (in decimal degrees) that will be recorded as
CRDER2 in WCS and header
[Default = None]
"""
# compute the matrix for the scale and rotation correction
if fit is None:
fit = linearfit.buildFitMatrix(rot, scale)
shift = np.asarray([xsh, ysh]) - np.dot(wcslin.wcs.crpix, fit) + wcslin.wcs.crpix
fit = np.linalg.inv(fit).T
cwcs = chip_wcs.deepcopy()
cd_eye = np.eye(chip_wcs.wcs.cd.shape[0], dtype=ndfloat128)
zero_shift = np.zeros(2, dtype=ndfloat128)
naxis1, naxis2 = chip_wcs.pixel_shape
# estimate precision necessary for iterative processes:
maxiter = 100
crpix2corners = np.dstack([i.flatten() for i in np.meshgrid(
[1, naxis1], [1, naxis2])])[0] - chip_wcs.wcs.crpix
maxUerr = 1.0e-5 / np.amax(np.linalg.norm(crpix2corners, axis=1))
# estimate step for numerical differentiation. We need a step
# large enough to avoid rounding errors and small enough to get a
# better precision for numerical differentiation.
# TODO: The logic below should be revised at a later time so that it
# better takes into account the two competing requirements.
hx = max(1.0, min(20.0, (chip_wcs.wcs.crpix[0] - 1.0) / 100.0,
(naxis1 - chip_wcs.wcs.crpix[0]) / 100.0))
hy = max(1.0, min(20.0, (chip_wcs.wcs.crpix[1] - 1.0) / 100.0,
(naxis2 - chip_wcs.wcs.crpix[1]) / 100.0))
# compute new CRVAL for the image WCS:
crpixinref = wcslin.wcs_world2pix(
chip_wcs.wcs_pix2world([chip_wcs.wcs.crpix],1),1)
crpixinref = np.dot(fit, (crpixinref - shift).T).T
chip_wcs.wcs.crval = wcslin.wcs_pix2world(crpixinref, 1)[0]
chip_wcs.wcs.set()
# initial approximation for CD matrix of the image WCS:
(U, u) = linearize(cwcs, chip_wcs, wcslin, chip_wcs.wcs.crpix,
fit, shift, hx=hx, hy=hy)
err0 = np.amax(np.abs(U-cd_eye)).astype(np.float64)
chip_wcs.wcs.cd = np.dot(chip_wcs.wcs.cd.astype(ndfloat128), U).astype(np.float64)
chip_wcs.wcs.set()
# NOTE: initial solution is the exact mathematical solution (modulo numeric
# differentiation). However, e.g., due to rounding errors, approximate
# numerical differentiation, the solution may be improved by performing
# several iterations. The next step will try to perform
# fixed-point iterations to "improve" the solution
# but this is not really required.
# Perform fixed-point iterations to improve the approximation
# for CD matrix of the image WCS (actually for the U matrix).
for i in range(maxiter):
(U, u) = linearize(chip_wcs, chip_wcs, wcslin, chip_wcs.wcs.crpix,
cd_eye, zero_shift, hx=hx, hy=hy)
err = np.amax(np.abs(U-cd_eye)).astype(np.float64)
if err > err0:
break
chip_wcs.wcs.cd = np.dot(chip_wcs.wcs.cd, U).astype(np.float64)
chip_wcs.wcs.set()
if err < maxUerr:
break
err0 = err
if xrms is not None:
chip_wcs.wcs.crder = np.array([xrms,yrms]) |
def remove(self, rel_path, propagate=False):
'''Delete the file from the cache, and from the upstream'''
if not self.upstream:
raise Exception("Must have an upstream")
# Must always propagate, since this is really just a filter.
self.upstream.remove(self._rename(rel_path), propagate) | Delete the file from the cache, and from the upstream | Below is the the instruction that describes the task:
### Input:
Delete the file from the cache, and from the upstream
### Response:
def remove(self, rel_path, propagate=False):
'''Delete the file from the cache, and from the upstream'''
if not self.upstream:
raise Exception("Must have an upstream")
# Must always propagate, since this is really just a filter.
self.upstream.remove(self._rename(rel_path), propagate) |
def create_distant_reference(self, ref_data):
"""Validate and create the reference in Zotero and return the created item."""
self.validate_reference_data(ref_data)
creation_status = self._zotero_lib.create_items([ref_data])
try:
created_item = creation_status["successful"]["0"]
return created_item
except KeyError as e:
print(creation_status)
raise CreateZoteroItemError from e | Validate and create the reference in Zotero and return the created item. | Below is the the instruction that describes the task:
### Input:
Validate and create the reference in Zotero and return the created item.
### Response:
def create_distant_reference(self, ref_data):
"""Validate and create the reference in Zotero and return the created item."""
self.validate_reference_data(ref_data)
creation_status = self._zotero_lib.create_items([ref_data])
try:
created_item = creation_status["successful"]["0"]
return created_item
except KeyError as e:
print(creation_status)
raise CreateZoteroItemError from e |
def _conf(cls, opts):
"""Setup logging via ini-file from logging_conf_file option."""
if not opts.logging_conf_file:
return False
if not os.path.exists(opts.logging_conf_file):
# FileNotFoundError added only in Python 3.3
# https://docs.python.org/3/whatsnew/3.3.html#pep-3151-reworking-the-os-and-io-exception-hierarchy
raise OSError("Error: Unable to locate specified logging configuration file!")
logging.config.fileConfig(opts.logging_conf_file, disable_existing_loggers=False)
return True | Setup logging via ini-file from logging_conf_file option. | Below is the the instruction that describes the task:
### Input:
Setup logging via ini-file from logging_conf_file option.
### Response:
def _conf(cls, opts):
"""Setup logging via ini-file from logging_conf_file option."""
if not opts.logging_conf_file:
return False
if not os.path.exists(opts.logging_conf_file):
# FileNotFoundError added only in Python 3.3
# https://docs.python.org/3/whatsnew/3.3.html#pep-3151-reworking-the-os-and-io-exception-hierarchy
raise OSError("Error: Unable to locate specified logging configuration file!")
logging.config.fileConfig(opts.logging_conf_file, disable_existing_loggers=False)
return True |
def _closeResources(self):
""" Closes the root Dataset.
"""
logger.info("Closing: {}".format(self._fileName))
self._h5Group.close()
self._h5Group = None | Closes the root Dataset. | Below is the the instruction that describes the task:
### Input:
Closes the root Dataset.
### Response:
def _closeResources(self):
""" Closes the root Dataset.
"""
logger.info("Closing: {}".format(self._fileName))
self._h5Group.close()
self._h5Group = None |
def join(
self,
words,
sep=None,
sep_spaced=True,
final_sep=None,
conj="and",
conj_spaced=True,
):
"""
Join words into a list.
e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly'
options:
conj: replacement for 'and'
sep: separator. default ',', unless ',' is in the list then ';'
final_sep: final separator. default ',', unless ',' is in the list then ';'
conj_spaced: boolean. Should conj have spaces around it
"""
if not words:
return ""
if len(words) == 1:
return words[0]
if conj_spaced:
if conj == "":
conj = " "
else:
conj = " %s " % conj
if len(words) == 2:
return "{}{}{}".format(words[0], conj, words[1])
if sep is None:
if "," in "".join(words):
sep = ";"
else:
sep = ","
if final_sep is None:
final_sep = sep
final_sep = "{}{}".format(final_sep, conj)
if sep_spaced:
sep += " "
return "{}{}{}".format(sep.join(words[0:-1]), final_sep, words[-1]) | Join words into a list.
e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly'
options:
conj: replacement for 'and'
sep: separator. default ',', unless ',' is in the list then ';'
final_sep: final separator. default ',', unless ',' is in the list then ';'
conj_spaced: boolean. Should conj have spaces around it | Below is the the instruction that describes the task:
### Input:
Join words into a list.
e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly'
options:
conj: replacement for 'and'
sep: separator. default ',', unless ',' is in the list then ';'
final_sep: final separator. default ',', unless ',' is in the list then ';'
conj_spaced: boolean. Should conj have spaces around it
### Response:
def join(
self,
words,
sep=None,
sep_spaced=True,
final_sep=None,
conj="and",
conj_spaced=True,
):
"""
Join words into a list.
e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly'
options:
conj: replacement for 'and'
sep: separator. default ',', unless ',' is in the list then ';'
final_sep: final separator. default ',', unless ',' is in the list then ';'
conj_spaced: boolean. Should conj have spaces around it
"""
if not words:
return ""
if len(words) == 1:
return words[0]
if conj_spaced:
if conj == "":
conj = " "
else:
conj = " %s " % conj
if len(words) == 2:
return "{}{}{}".format(words[0], conj, words[1])
if sep is None:
if "," in "".join(words):
sep = ";"
else:
sep = ","
if final_sep is None:
final_sep = sep
final_sep = "{}{}".format(final_sep, conj)
if sep_spaced:
sep += " "
return "{}{}{}".format(sep.join(words[0:-1]), final_sep, words[-1]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.