code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def wr_xlsx(self, fout_xlsx, nts):
"""Write specified namedtuples into an Excel spreadsheet."""
wr_xlsx(fout_xlsx, nts, prt_flds=self.prt_flds, fld2col_widths=self.fld2col_widths) | Write specified namedtuples into an Excel spreadsheet. | Below is the the instruction that describes the task:
### Input:
Write specified namedtuples into an Excel spreadsheet.
### Response:
def wr_xlsx(self, fout_xlsx, nts):
"""Write specified namedtuples into an Excel spreadsheet."""
wr_xlsx(fout_xlsx, nts, prt_flds=self.prt_flds, fld2col_widths=self.fld2col_widths) |
def generateDiff(self, oldWarnings, newWarnings):
"""
Generate diff between given two lists of warnings.
@param oldWarnings: parsed old warnings
@param newWarnings: parsed new warnings
@return: a dict object of diff
"""
diffWarnings = {}
for modulename in newWarnings:
diffInModule = (
newWarnings[modulename] -
oldWarnings.get(modulename, set()))
if diffInModule:
diffWarnings[modulename] = diffInModule
return diffWarnings | Generate diff between given two lists of warnings.
@param oldWarnings: parsed old warnings
@param newWarnings: parsed new warnings
@return: a dict object of diff | Below is the the instruction that describes the task:
### Input:
Generate diff between given two lists of warnings.
@param oldWarnings: parsed old warnings
@param newWarnings: parsed new warnings
@return: a dict object of diff
### Response:
def generateDiff(self, oldWarnings, newWarnings):
"""
Generate diff between given two lists of warnings.
@param oldWarnings: parsed old warnings
@param newWarnings: parsed new warnings
@return: a dict object of diff
"""
diffWarnings = {}
for modulename in newWarnings:
diffInModule = (
newWarnings[modulename] -
oldWarnings.get(modulename, set()))
if diffInModule:
diffWarnings[modulename] = diffInModule
return diffWarnings |
def put(self, request, bot_id, id, format=None):
"""
Update existing KikBot
---
serializer: KikBotUpdateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(KikBotDetail, self).put(request, bot_id, id, format) | Update existing KikBot
---
serializer: KikBotUpdateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request | Below is the the instruction that describes the task:
### Input:
Update existing KikBot
---
serializer: KikBotUpdateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
### Response:
def put(self, request, bot_id, id, format=None):
"""
Update existing KikBot
---
serializer: KikBotUpdateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(KikBotDetail, self).put(request, bot_id, id, format) |
def calculate_size(name, items):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += INT_SIZE_IN_BYTES
for items_item in items:
data_size += calculate_size_data(items_item)
return data_size | Calculates the request payload size | Below is the the instruction that describes the task:
### Input:
Calculates the request payload size
### Response:
def calculate_size(name, items):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += INT_SIZE_IN_BYTES
for items_item in items:
data_size += calculate_size_data(items_item)
return data_size |
def load(config_path: str):
"""
Load a configuration and keep it alive for the given context
:param config_path: path to a configuration file
"""
# we bind the config to _ to keep it alive
if os.path.splitext(config_path)[1] in ('.yaml', '.yml'):
_ = load_yaml_configuration(config_path, translator=PipelineTranslator())
elif os.path.splitext(config_path)[1] == '.py':
_ = load_python_configuration(config_path)
else:
raise ValueError('Unknown configuration extension: %r' % os.path.splitext(config_path)[1])
yield | Load a configuration and keep it alive for the given context
:param config_path: path to a configuration file | Below is the the instruction that describes the task:
### Input:
Load a configuration and keep it alive for the given context
:param config_path: path to a configuration file
### Response:
def load(config_path: str):
"""
Load a configuration and keep it alive for the given context
:param config_path: path to a configuration file
"""
# we bind the config to _ to keep it alive
if os.path.splitext(config_path)[1] in ('.yaml', '.yml'):
_ = load_yaml_configuration(config_path, translator=PipelineTranslator())
elif os.path.splitext(config_path)[1] == '.py':
_ = load_python_configuration(config_path)
else:
raise ValueError('Unknown configuration extension: %r' % os.path.splitext(config_path)[1])
yield |
def _format_fields(self, fields, title_width=12):
"""Formats a list of fields for display.
Parameters
----------
fields : list
A list of 2-tuples: (field_title, field_content)
title_width : int
How many characters to pad titles to. Default 12.
"""
out = []
header = self.__head
for title, content in fields:
if len(content.splitlines()) > 1:
title = header(title + ":") + "\n"
else:
title = header((title+":").ljust(title_width))
out.append(title + content)
return "\n".join(out) | Formats a list of fields for display.
Parameters
----------
fields : list
A list of 2-tuples: (field_title, field_content)
title_width : int
How many characters to pad titles to. Default 12. | Below is the the instruction that describes the task:
### Input:
Formats a list of fields for display.
Parameters
----------
fields : list
A list of 2-tuples: (field_title, field_content)
title_width : int
How many characters to pad titles to. Default 12.
### Response:
def _format_fields(self, fields, title_width=12):
"""Formats a list of fields for display.
Parameters
----------
fields : list
A list of 2-tuples: (field_title, field_content)
title_width : int
How many characters to pad titles to. Default 12.
"""
out = []
header = self.__head
for title, content in fields:
if len(content.splitlines()) > 1:
title = header(title + ":") + "\n"
else:
title = header((title+":").ljust(title_width))
out.append(title + content)
return "\n".join(out) |
def state_call(self, addr, *args, **kwargs):
"""
Create a native or a Java call state.
:param addr: Soot or native addr of the invoke target.
:param args: List of SootArgument values.
"""
state = kwargs.pop('base_state', None)
# check if we need to setup a native or a java callsite
if isinstance(addr, SootAddressDescriptor):
# JAVA CALLSITE
# ret addr precedence: ret_addr kwarg > base_state.addr > terminator
ret_addr = kwargs.pop('ret_addr', state.addr if state else SootAddressTerminator())
cc = kwargs.pop('cc', SimCCSoot(self.arch))
if state is None:
state = self.state_blank(addr=addr, **kwargs)
else:
state = state.copy()
state.regs.ip = addr
cc.setup_callsite(state, ret_addr, args)
return state
else:
# NATIVE CALLSITE
# setup native argument values
native_arg_values = []
for arg in args:
if arg.type in ArchSoot.primitive_types or \
arg.type == "JNIEnv":
# the value of primitive types and the JNIEnv pointer
# are just getting copied into the native memory
native_arg_value = arg.value
if self.arch.bits == 32 and arg.type == "long":
# On 32 bit architecture, long values (w/ 64 bit) are copied
# as two 32 bit integer
# TODO is this correct?
upper = native_arg_value.get_bytes(0, 4)
lower = native_arg_value.get_bytes(4, 4)
idx = args.index(arg)
args = args[:idx] \
+ (SootArgument(upper, 'int'), SootArgument(lower, 'int')) \
+ args[idx+1:]
native_arg_values += [upper, lower]
continue
else:
# argument has a relative type
# => map Java reference to an opaque reference, which the native code
# can use to access the Java object through the JNI interface
native_arg_value = state.jni_references.create_new_reference(obj=arg.value)
native_arg_values += [native_arg_value]
# setup native return type
ret_type = kwargs.pop('ret_type')
native_ret_type = self.get_native_type(ret_type)
# setup function prototype, so the SimCC know how to init the callsite
arg_types = [self.get_native_type(arg.type) for arg in args]
prototype = SimTypeFunction(args=arg_types, returnty=native_ret_type)
native_cc = self.get_native_cc(func_ty=prototype)
# setup native invoke state
return self.native_simos.state_call(addr, *native_arg_values,
base_state=state,
ret_addr=self.native_return_hook_addr,
cc=native_cc, **kwargs) | Create a native or a Java call state.
:param addr: Soot or native addr of the invoke target.
:param args: List of SootArgument values. | Below is the the instruction that describes the task:
### Input:
Create a native or a Java call state.
:param addr: Soot or native addr of the invoke target.
:param args: List of SootArgument values.
### Response:
def state_call(self, addr, *args, **kwargs):
"""
Create a native or a Java call state.
:param addr: Soot or native addr of the invoke target.
:param args: List of SootArgument values.
"""
state = kwargs.pop('base_state', None)
# check if we need to setup a native or a java callsite
if isinstance(addr, SootAddressDescriptor):
# JAVA CALLSITE
# ret addr precedence: ret_addr kwarg > base_state.addr > terminator
ret_addr = kwargs.pop('ret_addr', state.addr if state else SootAddressTerminator())
cc = kwargs.pop('cc', SimCCSoot(self.arch))
if state is None:
state = self.state_blank(addr=addr, **kwargs)
else:
state = state.copy()
state.regs.ip = addr
cc.setup_callsite(state, ret_addr, args)
return state
else:
# NATIVE CALLSITE
# setup native argument values
native_arg_values = []
for arg in args:
if arg.type in ArchSoot.primitive_types or \
arg.type == "JNIEnv":
# the value of primitive types and the JNIEnv pointer
# are just getting copied into the native memory
native_arg_value = arg.value
if self.arch.bits == 32 and arg.type == "long":
# On 32 bit architecture, long values (w/ 64 bit) are copied
# as two 32 bit integer
# TODO is this correct?
upper = native_arg_value.get_bytes(0, 4)
lower = native_arg_value.get_bytes(4, 4)
idx = args.index(arg)
args = args[:idx] \
+ (SootArgument(upper, 'int'), SootArgument(lower, 'int')) \
+ args[idx+1:]
native_arg_values += [upper, lower]
continue
else:
# argument has a relative type
# => map Java reference to an opaque reference, which the native code
# can use to access the Java object through the JNI interface
native_arg_value = state.jni_references.create_new_reference(obj=arg.value)
native_arg_values += [native_arg_value]
# setup native return type
ret_type = kwargs.pop('ret_type')
native_ret_type = self.get_native_type(ret_type)
# setup function prototype, so the SimCC know how to init the callsite
arg_types = [self.get_native_type(arg.type) for arg in args]
prototype = SimTypeFunction(args=arg_types, returnty=native_ret_type)
native_cc = self.get_native_cc(func_ty=prototype)
# setup native invoke state
return self.native_simos.state_call(addr, *native_arg_values,
base_state=state,
ret_addr=self.native_return_hook_addr,
cc=native_cc, **kwargs) |
def auto2unicode(text):
"""
This function tries to identify encode in available encodings.
If it finds, then it will convert text into unicode string.
Author : Arulalan.T
04.08.2014
"""
_all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes()
# get unique word which falls under any one of available encodes from
# user passed text lines
unique_chars = _get_unique_ch(text, _all_common_encodes_)
# count common encode chars
clen = len(_all_common_encodes_)
msg = "Sorry, couldn't find encode :-(\n"
msg += 'Need more words to find unique encode out side of %d ' % clen
msg += 'common compound characters'
if not unique_chars:
print(msg)
return ''
# end of if not unique_chars:
for encode_name, encode_keys in _all_unique_encodes_:
if not len(encode_keys): continue
for ch in encode_keys:
# check either encode char is presnent in word
if ch in unique_chars:
# found encode
print(("Found encode : ", encode_name))
encode = _all_encodes_[encode_name]
return encode2unicode(text, encode)
# end of if ch in unique_chars:
# end of ifor ch in encode_keys:
else:
print(msg)
return '' | This function tries to identify encode in available encodings.
If it finds, then it will convert text into unicode string.
Author : Arulalan.T
04.08.2014 | Below is the the instruction that describes the task:
### Input:
This function tries to identify encode in available encodings.
If it finds, then it will convert text into unicode string.
Author : Arulalan.T
04.08.2014
### Response:
def auto2unicode(text):
"""
This function tries to identify encode in available encodings.
If it finds, then it will convert text into unicode string.
Author : Arulalan.T
04.08.2014
"""
_all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes()
# get unique word which falls under any one of available encodes from
# user passed text lines
unique_chars = _get_unique_ch(text, _all_common_encodes_)
# count common encode chars
clen = len(_all_common_encodes_)
msg = "Sorry, couldn't find encode :-(\n"
msg += 'Need more words to find unique encode out side of %d ' % clen
msg += 'common compound characters'
if not unique_chars:
print(msg)
return ''
# end of if not unique_chars:
for encode_name, encode_keys in _all_unique_encodes_:
if not len(encode_keys): continue
for ch in encode_keys:
# check either encode char is presnent in word
if ch in unique_chars:
# found encode
print(("Found encode : ", encode_name))
encode = _all_encodes_[encode_name]
return encode2unicode(text, encode)
# end of if ch in unique_chars:
# end of ifor ch in encode_keys:
else:
print(msg)
return '' |
def wrap(name, project, sprefix=None, python=sys.executable):
""" Wrap the binary :name: with the runtime extension of the project.
This module generates a python tool that replaces :name:
The function in runner only accepts the replaced binaries
name as argument. We use the cloudpickle package to
perform the serialization, make sure :runner: can be serialized
with it and you're fine.
Args:
name: Binary we want to wrap
project: The project that contains the runtime_extension we want
to run instead of the binary.
Returns:
A plumbum command, ready to launch.
"""
env = __create_jinja_env()
template = env.get_template('run_static.py.inc')
name_absolute = os.path.abspath(name)
real_f = name_absolute + PROJECT_BIN_F_EXT
if sprefix:
run(uchroot()["/bin/mv",
strip_path_prefix(name_absolute, sprefix),
strip_path_prefix(real_f, sprefix)])
else:
run(mv[name_absolute, real_f])
project_file = persist(project, suffix=".project")
env = CFG['env'].value
bin_path = list_to_path(env.get('PATH', []))
bin_path = list_to_path([bin_path, os.environ["PATH"]])
bin_lib_path = list_to_path(env.get('LD_LIBRARY_PATH', []))
bin_lib_path = list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]])
with open(name_absolute, 'w') as wrapper:
wrapper.write(
template.render(
runf=strip_path_prefix(real_f, sprefix),
project_file=strip_path_prefix(project_file, sprefix),
path=str(bin_path),
ld_library_path=str(bin_lib_path),
python=python,
))
run(chmod["+x", name_absolute])
return local[name_absolute] | Wrap the binary :name: with the runtime extension of the project.
This module generates a python tool that replaces :name:
The function in runner only accepts the replaced binaries
name as argument. We use the cloudpickle package to
perform the serialization, make sure :runner: can be serialized
with it and you're fine.
Args:
name: Binary we want to wrap
project: The project that contains the runtime_extension we want
to run instead of the binary.
Returns:
A plumbum command, ready to launch. | Below is the the instruction that describes the task:
### Input:
Wrap the binary :name: with the runtime extension of the project.
This module generates a python tool that replaces :name:
The function in runner only accepts the replaced binaries
name as argument. We use the cloudpickle package to
perform the serialization, make sure :runner: can be serialized
with it and you're fine.
Args:
name: Binary we want to wrap
project: The project that contains the runtime_extension we want
to run instead of the binary.
Returns:
A plumbum command, ready to launch.
### Response:
def wrap(name, project, sprefix=None, python=sys.executable):
""" Wrap the binary :name: with the runtime extension of the project.
This module generates a python tool that replaces :name:
The function in runner only accepts the replaced binaries
name as argument. We use the cloudpickle package to
perform the serialization, make sure :runner: can be serialized
with it and you're fine.
Args:
name: Binary we want to wrap
project: The project that contains the runtime_extension we want
to run instead of the binary.
Returns:
A plumbum command, ready to launch.
"""
env = __create_jinja_env()
template = env.get_template('run_static.py.inc')
name_absolute = os.path.abspath(name)
real_f = name_absolute + PROJECT_BIN_F_EXT
if sprefix:
run(uchroot()["/bin/mv",
strip_path_prefix(name_absolute, sprefix),
strip_path_prefix(real_f, sprefix)])
else:
run(mv[name_absolute, real_f])
project_file = persist(project, suffix=".project")
env = CFG['env'].value
bin_path = list_to_path(env.get('PATH', []))
bin_path = list_to_path([bin_path, os.environ["PATH"]])
bin_lib_path = list_to_path(env.get('LD_LIBRARY_PATH', []))
bin_lib_path = list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]])
with open(name_absolute, 'w') as wrapper:
wrapper.write(
template.render(
runf=strip_path_prefix(real_f, sprefix),
project_file=strip_path_prefix(project_file, sprefix),
path=str(bin_path),
ld_library_path=str(bin_lib_path),
python=python,
))
run(chmod["+x", name_absolute])
return local[name_absolute] |
def calacs(input_file, exec_path=None, time_stamps=False, temp_files=False,
verbose=False, debug=False, quiet=False, single_core=False,
exe_args=None):
"""
Run the calacs.e executable as from the shell.
By default this will run the calacs given by 'calacs.e'.
Parameters
----------
input_file : str
Name of input file.
exec_path : str, optional
The complete path to a calacs executable.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
temp_files : bool, optional
Set to True to have CALACS save temporary files.
verbose : bool, optional
Set to True for verbose output.
debug : bool, optional
Set to True to turn on debugging output.
quiet : bool, optional
Set to True for quiet output.
single_core : bool, optional
CTE correction in CALACS will by default try to use all available
CPUs on your computer. Set this to True to force the use of just
one CPU.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
"""
if exec_path:
if not os.path.exists(exec_path):
raise OSError('Executable not found: ' + exec_path)
call_list = [exec_path]
else:
call_list = ['calacs.e']
if time_stamps:
call_list.append('-t')
if temp_files:
call_list.append('-s')
if verbose:
call_list.append('-v')
if debug:
call_list.append('-d')
if quiet:
call_list.append('-q')
if single_core:
call_list.append('-1')
if not os.path.exists(input_file):
raise IOError('Input file not found: ' + input_file)
call_list.append(input_file)
if exe_args:
call_list.extend(exe_args)
subprocess.check_call(call_list) | Run the calacs.e executable as from the shell.
By default this will run the calacs given by 'calacs.e'.
Parameters
----------
input_file : str
Name of input file.
exec_path : str, optional
The complete path to a calacs executable.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
temp_files : bool, optional
Set to True to have CALACS save temporary files.
verbose : bool, optional
Set to True for verbose output.
debug : bool, optional
Set to True to turn on debugging output.
quiet : bool, optional
Set to True for quiet output.
single_core : bool, optional
CTE correction in CALACS will by default try to use all available
CPUs on your computer. Set this to True to force the use of just
one CPU.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1'] | Below is the the instruction that describes the task:
### Input:
Run the calacs.e executable as from the shell.
By default this will run the calacs given by 'calacs.e'.
Parameters
----------
input_file : str
Name of input file.
exec_path : str, optional
The complete path to a calacs executable.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
temp_files : bool, optional
Set to True to have CALACS save temporary files.
verbose : bool, optional
Set to True for verbose output.
debug : bool, optional
Set to True to turn on debugging output.
quiet : bool, optional
Set to True for quiet output.
single_core : bool, optional
CTE correction in CALACS will by default try to use all available
CPUs on your computer. Set this to True to force the use of just
one CPU.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
### Response:
def calacs(input_file, exec_path=None, time_stamps=False, temp_files=False,
verbose=False, debug=False, quiet=False, single_core=False,
exe_args=None):
"""
Run the calacs.e executable as from the shell.
By default this will run the calacs given by 'calacs.e'.
Parameters
----------
input_file : str
Name of input file.
exec_path : str, optional
The complete path to a calacs executable.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
temp_files : bool, optional
Set to True to have CALACS save temporary files.
verbose : bool, optional
Set to True for verbose output.
debug : bool, optional
Set to True to turn on debugging output.
quiet : bool, optional
Set to True for quiet output.
single_core : bool, optional
CTE correction in CALACS will by default try to use all available
CPUs on your computer. Set this to True to force the use of just
one CPU.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
"""
if exec_path:
if not os.path.exists(exec_path):
raise OSError('Executable not found: ' + exec_path)
call_list = [exec_path]
else:
call_list = ['calacs.e']
if time_stamps:
call_list.append('-t')
if temp_files:
call_list.append('-s')
if verbose:
call_list.append('-v')
if debug:
call_list.append('-d')
if quiet:
call_list.append('-q')
if single_core:
call_list.append('-1')
if not os.path.exists(input_file):
raise IOError('Input file not found: ' + input_file)
call_list.append(input_file)
if exe_args:
call_list.extend(exe_args)
subprocess.check_call(call_list) |
def json_from_cov_df(df, threshold=.5, gain=2., n=None, indent=1):
"""Produce a json string describing the graph (list of edges) from a square auto-correlation/covariance matrix
{ "nodes": [{"group": 1, "name": "the"},
{"group": 1, "name": "and"},
{"group": 1, "name": "our"},
{"group": 2, "name": "that"},...
"links": [{"source": 0, "target": 0, "value": 2.637520131294177},
{"source": 0, "target": 1, "value": 1.343999676850537}, ...
"""
nodes, edges = graph_from_cov_df(df=df, threshold=threshold, gain=gain, n=n)
return json.dumps({'nodes': nodes, 'links': edges}, indent=indent) | Produce a json string describing the graph (list of edges) from a square auto-correlation/covariance matrix
{ "nodes": [{"group": 1, "name": "the"},
{"group": 1, "name": "and"},
{"group": 1, "name": "our"},
{"group": 2, "name": "that"},...
"links": [{"source": 0, "target": 0, "value": 2.637520131294177},
{"source": 0, "target": 1, "value": 1.343999676850537}, ... | Below is the the instruction that describes the task:
### Input:
Produce a json string describing the graph (list of edges) from a square auto-correlation/covariance matrix
{ "nodes": [{"group": 1, "name": "the"},
{"group": 1, "name": "and"},
{"group": 1, "name": "our"},
{"group": 2, "name": "that"},...
"links": [{"source": 0, "target": 0, "value": 2.637520131294177},
{"source": 0, "target": 1, "value": 1.343999676850537}, ...
### Response:
def json_from_cov_df(df, threshold=.5, gain=2., n=None, indent=1):
"""Produce a json string describing the graph (list of edges) from a square auto-correlation/covariance matrix
{ "nodes": [{"group": 1, "name": "the"},
{"group": 1, "name": "and"},
{"group": 1, "name": "our"},
{"group": 2, "name": "that"},...
"links": [{"source": 0, "target": 0, "value": 2.637520131294177},
{"source": 0, "target": 1, "value": 1.343999676850537}, ...
"""
nodes, edges = graph_from_cov_df(df=df, threshold=threshold, gain=gain, n=n)
return json.dumps({'nodes': nodes, 'links': edges}, indent=indent) |
def property_present(properties, admin_username='root', admin_password='calvin', host=None, **kwargs):
'''
properties = {}
'''
ret = {'name': host,
'context': {'Host': host},
'result': True,
'changes': {},
'comment': ''}
if host is None:
output = __salt__['cmd.run_all']('ipmitool lan print')
stdout = output['stdout']
reg = re.compile(r'\s*IP Address\s*:\s*(\d+.\d+.\d+.\d+)\s*')
for line in stdout:
result = reg.match(line)
if result is not None:
# we want group(1) as this is match in parentheses
host = result.group(1)
break
if not host:
ret['result'] = False
ret['comment'] = 'Unknown host!'
return ret
properties_get = {}
for key, value in properties.items():
response = __salt__['dracr.get_property'](host, admin_username, admin_password, key)
if response is False or response['retcode'] != 0:
ret['result'] = False
ret['comment'] = 'Failed to get property from idrac'
return ret
properties_get[key] = response['stdout'].split('\n')[-1].split('=')[-1]
if __opts__['test']:
for key, value in properties.items():
if properties_get[key] == value:
ret['changes'][key] = 'Won\'t be changed'
else:
ret['changes'][key] = 'Will be changed to {0}'.format(properties_get[key])
return ret
for key, value in properties.items():
if properties_get[key] != value:
response = __salt__['dracr.set_property'](host, admin_username, admin_password, key, value)
if response is False or response['retcode'] != 0:
ret['result'] = False
ret['comment'] = 'Failed to set property from idrac'
return ret
ret['changes'][key] = 'will be changed - old value {0} , new value {1}'.format(properties_get[key], value)
return ret | properties = {} | Below is the the instruction that describes the task:
### Input:
properties = {}
### Response:
def property_present(properties, admin_username='root', admin_password='calvin', host=None, **kwargs):
'''
properties = {}
'''
ret = {'name': host,
'context': {'Host': host},
'result': True,
'changes': {},
'comment': ''}
if host is None:
output = __salt__['cmd.run_all']('ipmitool lan print')
stdout = output['stdout']
reg = re.compile(r'\s*IP Address\s*:\s*(\d+.\d+.\d+.\d+)\s*')
for line in stdout:
result = reg.match(line)
if result is not None:
# we want group(1) as this is match in parentheses
host = result.group(1)
break
if not host:
ret['result'] = False
ret['comment'] = 'Unknown host!'
return ret
properties_get = {}
for key, value in properties.items():
response = __salt__['dracr.get_property'](host, admin_username, admin_password, key)
if response is False or response['retcode'] != 0:
ret['result'] = False
ret['comment'] = 'Failed to get property from idrac'
return ret
properties_get[key] = response['stdout'].split('\n')[-1].split('=')[-1]
if __opts__['test']:
for key, value in properties.items():
if properties_get[key] == value:
ret['changes'][key] = 'Won\'t be changed'
else:
ret['changes'][key] = 'Will be changed to {0}'.format(properties_get[key])
return ret
for key, value in properties.items():
if properties_get[key] != value:
response = __salt__['dracr.set_property'](host, admin_username, admin_password, key, value)
if response is False or response['retcode'] != 0:
ret['result'] = False
ret['comment'] = 'Failed to set property from idrac'
return ret
ret['changes'][key] = 'will be changed - old value {0} , new value {1}'.format(properties_get[key], value)
return ret |
def ipopo_factories(self):
"""
List of iPOPO factories
"""
try:
with use_ipopo(self.__context) as ipopo:
return {
name: ipopo.get_factory_details(name)
for name in ipopo.get_factories()
}
except BundleException:
# iPOPO is not available:
return None | List of iPOPO factories | Below is the the instruction that describes the task:
### Input:
List of iPOPO factories
### Response:
def ipopo_factories(self):
"""
List of iPOPO factories
"""
try:
with use_ipopo(self.__context) as ipopo:
return {
name: ipopo.get_factory_details(name)
for name in ipopo.get_factories()
}
except BundleException:
# iPOPO is not available:
return None |
def amax_files():
"""
Return all annual maximum flow (`*.am`) files in cache folder and sub folders.
:return: List of file paths
:rtype: list
"""
return [os.path.join(dp, f) for dp, dn, filenames in os.walk(CACHE_FOLDER)
for f in filenames if os.path.splitext(f)[1].lower() == '.am'] | Return all annual maximum flow (`*.am`) files in cache folder and sub folders.
:return: List of file paths
:rtype: list | Below is the the instruction that describes the task:
### Input:
Return all annual maximum flow (`*.am`) files in cache folder and sub folders.
:return: List of file paths
:rtype: list
### Response:
def amax_files():
"""
Return all annual maximum flow (`*.am`) files in cache folder and sub folders.
:return: List of file paths
:rtype: list
"""
return [os.path.join(dp, f) for dp, dn, filenames in os.walk(CACHE_FOLDER)
for f in filenames if os.path.splitext(f)[1].lower() == '.am'] |
def writeImageToFile(self, filename, _format="PNG"):
'''
Write the View image to the specified filename in the specified format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by this View unique ID and
format extension.
@type _format: str
@param _format: Image format (default format is PNG)
'''
filename = self.device.substituteDeviceTemplate(filename)
if not os.path.isabs(filename):
raise ValueError("writeImageToFile expects an absolute path (fielname='%s')" % filename)
if os.path.isdir(filename):
filename = os.path.join(filename, self.variableNameFromId() + '.' + _format.lower())
if DEBUG:
print >> sys.stderr, "writeImageToFile: saving image to '%s' in %s format" % (filename, _format)
#self.device.takeSnapshot().getSubImage(self.getPositionAndSize()).writeToFile(filename, _format)
# crop:
# im.crop(box) ⇒ image
# Returns a copy of a rectangular region from the current image.
# The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate.
((l, t), (r, b)) = self.getCoords()
box = (l, t, r, b)
if DEBUG:
print >> sys.stderr, "writeImageToFile: cropping", box, " reconnect=", self.device.reconnect
if self.uiAutomatorHelper:
if DEBUG_UI_AUTOMATOR_HELPER:
print >> sys.stderr, "Taking screenshot using UiAutomatorHelper"
received = self.uiAutomatorHelper.takeScreenshot()
stream = StringIO.StringIO(received)
try:
from PIL import Image
image = Image.open(stream)
except ImportError as ex:
# FIXME: this method should be global
self.pilNotInstalledWarning()
sys.exit(1)
except IOError, ex:
print >> sys.stderr, ex
print repr(stream)
sys.exit(1)
else:
image = self.device.takeSnapshot(reconnect=self.device.reconnect)
image.crop(box).save(filename, _format) | Write the View image to the specified filename in the specified format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by this View unique ID and
format extension.
@type _format: str
@param _format: Image format (default format is PNG) | Below is the the instruction that describes the task:
### Input:
Write the View image to the specified filename in the specified format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by this View unique ID and
format extension.
@type _format: str
@param _format: Image format (default format is PNG)
### Response:
def writeImageToFile(self, filename, _format="PNG"):
'''
Write the View image to the specified filename in the specified format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by this View unique ID and
format extension.
@type _format: str
@param _format: Image format (default format is PNG)
'''
filename = self.device.substituteDeviceTemplate(filename)
if not os.path.isabs(filename):
raise ValueError("writeImageToFile expects an absolute path (fielname='%s')" % filename)
if os.path.isdir(filename):
filename = os.path.join(filename, self.variableNameFromId() + '.' + _format.lower())
if DEBUG:
print >> sys.stderr, "writeImageToFile: saving image to '%s' in %s format" % (filename, _format)
#self.device.takeSnapshot().getSubImage(self.getPositionAndSize()).writeToFile(filename, _format)
# crop:
# im.crop(box) ⇒ image
# Returns a copy of a rectangular region from the current image.
# The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate.
((l, t), (r, b)) = self.getCoords()
box = (l, t, r, b)
if DEBUG:
print >> sys.stderr, "writeImageToFile: cropping", box, " reconnect=", self.device.reconnect
if self.uiAutomatorHelper:
if DEBUG_UI_AUTOMATOR_HELPER:
print >> sys.stderr, "Taking screenshot using UiAutomatorHelper"
received = self.uiAutomatorHelper.takeScreenshot()
stream = StringIO.StringIO(received)
try:
from PIL import Image
image = Image.open(stream)
except ImportError as ex:
# FIXME: this method should be global
self.pilNotInstalledWarning()
sys.exit(1)
except IOError, ex:
print >> sys.stderr, ex
print repr(stream)
sys.exit(1)
else:
image = self.device.takeSnapshot(reconnect=self.device.reconnect)
image.crop(box).save(filename, _format) |
def add_comment(self, post=None, name=None, email=None, pub_date=None,
website=None, body=None):
"""
Adds a comment to the post provided.
"""
if post is None:
if not self.posts:
raise CommandError("Cannot add comments without posts")
post = self.posts[-1]
post["comments"].append({
"user_name": name,
"user_email": email,
"submit_date": pub_date,
"user_url": website,
"comment": body,
}) | Adds a comment to the post provided. | Below is the the instruction that describes the task:
### Input:
Adds a comment to the post provided.
### Response:
def add_comment(self, post=None, name=None, email=None, pub_date=None,
website=None, body=None):
"""
Adds a comment to the post provided.
"""
if post is None:
if not self.posts:
raise CommandError("Cannot add comments without posts")
post = self.posts[-1]
post["comments"].append({
"user_name": name,
"user_email": email,
"submit_date": pub_date,
"user_url": website,
"comment": body,
}) |
def _get_enterprise_admin_users_batch(self, start, end):
"""
Returns a batched queryset of User objects.
"""
LOGGER.info('Fetching new batch of enterprise admin users from indexes: %s to %s', start, end)
return User.objects.filter(groups__name=ENTERPRISE_DATA_API_ACCESS_GROUP, is_staff=False)[start:end] | Returns a batched queryset of User objects. | Below is the the instruction that describes the task:
### Input:
Returns a batched queryset of User objects.
### Response:
def _get_enterprise_admin_users_batch(self, start, end):
"""
Returns a batched queryset of User objects.
"""
LOGGER.info('Fetching new batch of enterprise admin users from indexes: %s to %s', start, end)
return User.objects.filter(groups__name=ENTERPRISE_DATA_API_ACCESS_GROUP, is_staff=False)[start:end] |
def _long_to_bytes(n, length, byteorder):
"""Convert a long to a bytestring
For use in python version prior to 3.2
Source:
http://bugs.python.org/issue16580#msg177208
"""
if byteorder == 'little':
indexes = range(length)
else:
indexes = reversed(range(length))
return bytearray((n >> i * 8) & 0xff for i in indexes) | Convert a long to a bytestring
For use in python version prior to 3.2
Source:
http://bugs.python.org/issue16580#msg177208 | Below is the the instruction that describes the task:
### Input:
Convert a long to a bytestring
For use in python version prior to 3.2
Source:
http://bugs.python.org/issue16580#msg177208
### Response:
def _long_to_bytes(n, length, byteorder):
"""Convert a long to a bytestring
For use in python version prior to 3.2
Source:
http://bugs.python.org/issue16580#msg177208
"""
if byteorder == 'little':
indexes = range(length)
else:
indexes = reversed(range(length))
return bytearray((n >> i * 8) & 0xff for i in indexes) |
def format_args(options):
""" Convert hash/key options into arguments list """
args = list()
for key, value in options.items():
# convert foo_bar key into --foo-bar option
key = key.replace('_', '-')
if value is True:
# key: True
# --key
args.append('--{key}'.format(key=key))
elif is_sequence(value):
# key: ['foo', 'bar']
# --key=foo,bar
values = [str(val) for val in value]
args.append('--{key}={values}'.format(
key=key, values=','.join(values)))
else:
# key: 'foo'
# --key=foo
args.append('--{key}={value}'.format(key=key, value=value))
return args | Convert hash/key options into arguments list | Below is the the instruction that describes the task:
### Input:
Convert hash/key options into arguments list
### Response:
def format_args(options):
""" Convert hash/key options into arguments list """
args = list()
for key, value in options.items():
# convert foo_bar key into --foo-bar option
key = key.replace('_', '-')
if value is True:
# key: True
# --key
args.append('--{key}'.format(key=key))
elif is_sequence(value):
# key: ['foo', 'bar']
# --key=foo,bar
values = [str(val) for val in value]
args.append('--{key}={values}'.format(
key=key, values=','.join(values)))
else:
# key: 'foo'
# --key=foo
args.append('--{key}={value}'.format(key=key, value=value))
return args |
def db_get_map(self, table, record, column):
"""
Gets dict type value of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL
"""
val = self.db_get_val(table, record, column)
assert isinstance(val, dict)
return val | Gets dict type value of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL | Below is the the instruction that describes the task:
### Input:
Gets dict type value of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL
### Response:
def db_get_map(self, table, record, column):
"""
Gets dict type value of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL
"""
val = self.db_get_val(table, record, column)
assert isinstance(val, dict)
return val |
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name] | Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe. | Below is the the instruction that describes the task:
### Input:
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
### Response:
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name] |
def predictions(self, image, strict=True, return_details=False):
"""Interface to model.predictions for attacks.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
in_bounds = self.in_bounds(image)
assert not strict or in_bounds
self._total_prediction_calls += 1
predictions = self.__model.predictions(image)
is_adversarial, is_best, distance = self.__is_adversarial(
image, predictions, in_bounds)
assert predictions.ndim == 1
if return_details:
return predictions, is_adversarial, is_best, distance
else:
return predictions, is_adversarial | Interface to model.predictions for attacks.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
strict : bool
Controls if the bounds for the pixel values should be checked. | Below is the the instruction that describes the task:
### Input:
Interface to model.predictions for attacks.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
strict : bool
Controls if the bounds for the pixel values should be checked.
### Response:
def predictions(self, image, strict=True, return_details=False):
"""Interface to model.predictions for attacks.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
in_bounds = self.in_bounds(image)
assert not strict or in_bounds
self._total_prediction_calls += 1
predictions = self.__model.predictions(image)
is_adversarial, is_best, distance = self.__is_adversarial(
image, predictions, in_bounds)
assert predictions.ndim == 1
if return_details:
return predictions, is_adversarial, is_best, distance
else:
return predictions, is_adversarial |
def randomtable(numflds=5, numrows=100, wait=0, seed=None):
"""
Construct a table with random numerical data. Use `numflds` and `numrows` to
specify the number of fields and rows respectively. Set `wait` to a float
greater than zero to simulate a delay on each row generation (number of
seconds per row). E.g.::
>>> import petl as etl
>>> table = etl.randomtable(3, 100, seed=42)
>>> table
+----------------------+----------------------+---------------------+
| f0 | f1 | f2 |
+======================+======================+=====================+
| 0.6394267984578837 | 0.025010755222666936 | 0.27502931836911926 |
+----------------------+----------------------+---------------------+
| 0.22321073814882275 | 0.7364712141640124 | 0.6766994874229113 |
+----------------------+----------------------+---------------------+
| 0.8921795677048454 | 0.08693883262941615 | 0.4219218196852704 |
+----------------------+----------------------+---------------------+
| 0.029797219438070344 | 0.21863797480360336 | 0.5053552881033624 |
+----------------------+----------------------+---------------------+
| 0.026535969683863625 | 0.1988376506866485 | 0.6498844377795232 |
+----------------------+----------------------+---------------------+
...
Note that the data are generated on the fly and are not stored in memory,
so this function can be used to simulate very large tables.
"""
return RandomTable(numflds, numrows, wait=wait, seed=seed) | Construct a table with random numerical data. Use `numflds` and `numrows` to
specify the number of fields and rows respectively. Set `wait` to a float
greater than zero to simulate a delay on each row generation (number of
seconds per row). E.g.::
>>> import petl as etl
>>> table = etl.randomtable(3, 100, seed=42)
>>> table
+----------------------+----------------------+---------------------+
| f0 | f1 | f2 |
+======================+======================+=====================+
| 0.6394267984578837 | 0.025010755222666936 | 0.27502931836911926 |
+----------------------+----------------------+---------------------+
| 0.22321073814882275 | 0.7364712141640124 | 0.6766994874229113 |
+----------------------+----------------------+---------------------+
| 0.8921795677048454 | 0.08693883262941615 | 0.4219218196852704 |
+----------------------+----------------------+---------------------+
| 0.029797219438070344 | 0.21863797480360336 | 0.5053552881033624 |
+----------------------+----------------------+---------------------+
| 0.026535969683863625 | 0.1988376506866485 | 0.6498844377795232 |
+----------------------+----------------------+---------------------+
...
Note that the data are generated on the fly and are not stored in memory,
so this function can be used to simulate very large tables. | Below is the the instruction that describes the task:
### Input:
Construct a table with random numerical data. Use `numflds` and `numrows` to
specify the number of fields and rows respectively. Set `wait` to a float
greater than zero to simulate a delay on each row generation (number of
seconds per row). E.g.::
>>> import petl as etl
>>> table = etl.randomtable(3, 100, seed=42)
>>> table
+----------------------+----------------------+---------------------+
| f0 | f1 | f2 |
+======================+======================+=====================+
| 0.6394267984578837 | 0.025010755222666936 | 0.27502931836911926 |
+----------------------+----------------------+---------------------+
| 0.22321073814882275 | 0.7364712141640124 | 0.6766994874229113 |
+----------------------+----------------------+---------------------+
| 0.8921795677048454 | 0.08693883262941615 | 0.4219218196852704 |
+----------------------+----------------------+---------------------+
| 0.029797219438070344 | 0.21863797480360336 | 0.5053552881033624 |
+----------------------+----------------------+---------------------+
| 0.026535969683863625 | 0.1988376506866485 | 0.6498844377795232 |
+----------------------+----------------------+---------------------+
...
Note that the data are generated on the fly and are not stored in memory,
so this function can be used to simulate very large tables.
### Response:
def randomtable(numflds=5, numrows=100, wait=0, seed=None):
"""
Construct a table with random numerical data. Use `numflds` and `numrows` to
specify the number of fields and rows respectively. Set `wait` to a float
greater than zero to simulate a delay on each row generation (number of
seconds per row). E.g.::
>>> import petl as etl
>>> table = etl.randomtable(3, 100, seed=42)
>>> table
+----------------------+----------------------+---------------------+
| f0 | f1 | f2 |
+======================+======================+=====================+
| 0.6394267984578837 | 0.025010755222666936 | 0.27502931836911926 |
+----------------------+----------------------+---------------------+
| 0.22321073814882275 | 0.7364712141640124 | 0.6766994874229113 |
+----------------------+----------------------+---------------------+
| 0.8921795677048454 | 0.08693883262941615 | 0.4219218196852704 |
+----------------------+----------------------+---------------------+
| 0.029797219438070344 | 0.21863797480360336 | 0.5053552881033624 |
+----------------------+----------------------+---------------------+
| 0.026535969683863625 | 0.1988376506866485 | 0.6498844377795232 |
+----------------------+----------------------+---------------------+
...
Note that the data are generated on the fly and are not stored in memory,
so this function can be used to simulate very large tables.
"""
return RandomTable(numflds, numrows, wait=wait, seed=seed) |
def post_revert_tags(self, post_id, history_id):
"""Function to reverts a post to a previous set of tags
(Requires login) (UNTESTED).
Parameters:
post_id (int): The post id number to update.
history_id (int): The id number of the tag history.
"""
params = {'id': post_id, 'history_id': history_id}
return self._get('post/revert_tags', params, 'PUT') | Function to reverts a post to a previous set of tags
(Requires login) (UNTESTED).
Parameters:
post_id (int): The post id number to update.
history_id (int): The id number of the tag history. | Below is the the instruction that describes the task:
### Input:
Function to reverts a post to a previous set of tags
(Requires login) (UNTESTED).
Parameters:
post_id (int): The post id number to update.
history_id (int): The id number of the tag history.
### Response:
def post_revert_tags(self, post_id, history_id):
"""Function to reverts a post to a previous set of tags
(Requires login) (UNTESTED).
Parameters:
post_id (int): The post id number to update.
history_id (int): The id number of the tag history.
"""
params = {'id': post_id, 'history_id': history_id}
return self._get('post/revert_tags', params, 'PUT') |
def configure_modrpaf(self):
"""
Installs the mod-rpaf Apache module.
https://github.com/gnif/mod_rpaf
"""
r = self.local_renderer
if r.env.modrpaf_enabled:
self.install_packages()
self.enable_mod('rpaf')
else:
if self.last_manifest.modrpaf_enabled:
self.disable_mod('mod_rpaf') | Installs the mod-rpaf Apache module.
https://github.com/gnif/mod_rpaf | Below is the the instruction that describes the task:
### Input:
Installs the mod-rpaf Apache module.
https://github.com/gnif/mod_rpaf
### Response:
def configure_modrpaf(self):
"""
Installs the mod-rpaf Apache module.
https://github.com/gnif/mod_rpaf
"""
r = self.local_renderer
if r.env.modrpaf_enabled:
self.install_packages()
self.enable_mod('rpaf')
else:
if self.last_manifest.modrpaf_enabled:
self.disable_mod('mod_rpaf') |
def pretty_tree(x, kids, show):
"""(a, (a -> list(a)), (a -> str)) -> str
Returns a pseudographic tree representation of x similar to the tree command
in Unix.
"""
(MID, END, CONT, LAST, ROOT) = (u'|-- ', u'`-- ', u'| ', u' ', u'')
def rec(x, indent, sym):
line = indent + sym + show(x)
xs = kids(x)
if len(xs) == 0:
return line
else:
if sym == MID:
next_indent = indent + CONT
elif sym == ROOT:
next_indent = indent + ROOT
else:
next_indent = indent + LAST
syms = [MID] * (len(xs) - 1) + [END]
lines = [rec(x, next_indent, sym) for x, sym in zip(xs, syms)]
return u'\n'.join([line] + lines)
return rec(x, u'', ROOT) | (a, (a -> list(a)), (a -> str)) -> str
Returns a pseudographic tree representation of x similar to the tree command
in Unix. | Below is the the instruction that describes the task:
### Input:
(a, (a -> list(a)), (a -> str)) -> str
Returns a pseudographic tree representation of x similar to the tree command
in Unix.
### Response:
def pretty_tree(x, kids, show):
"""(a, (a -> list(a)), (a -> str)) -> str
Returns a pseudographic tree representation of x similar to the tree command
in Unix.
"""
(MID, END, CONT, LAST, ROOT) = (u'|-- ', u'`-- ', u'| ', u' ', u'')
def rec(x, indent, sym):
line = indent + sym + show(x)
xs = kids(x)
if len(xs) == 0:
return line
else:
if sym == MID:
next_indent = indent + CONT
elif sym == ROOT:
next_indent = indent + ROOT
else:
next_indent = indent + LAST
syms = [MID] * (len(xs) - 1) + [END]
lines = [rec(x, next_indent, sym) for x, sym in zip(xs, syms)]
return u'\n'.join([line] + lines)
return rec(x, u'', ROOT) |
def getConfig(self):
"""Return the configuration of the city.
:return: configuration of the city.
:rtype: dict.
"""
config = {}
config["name"] = self.city
config["intervals"] = self.__intervals
config["last_date"] = self.__lastDay
config["excludedUsers"] = []
config["excludedLocations"] = []
for e in self.__excludedUsers:
config["excludedUsers"].append(e)
for e in self.__excludedLocations:
config["excludedLocations"].append(e)
config["locations"] = self.__locations
return config | Return the configuration of the city.
:return: configuration of the city.
:rtype: dict. | Below is the the instruction that describes the task:
### Input:
Return the configuration of the city.
:return: configuration of the city.
:rtype: dict.
### Response:
def getConfig(self):
"""Return the configuration of the city.
:return: configuration of the city.
:rtype: dict.
"""
config = {}
config["name"] = self.city
config["intervals"] = self.__intervals
config["last_date"] = self.__lastDay
config["excludedUsers"] = []
config["excludedLocations"] = []
for e in self.__excludedUsers:
config["excludedUsers"].append(e)
for e in self.__excludedLocations:
config["excludedLocations"].append(e)
config["locations"] = self.__locations
return config |
def sid(self):
"""Semantic id."""
pnames = list(self.terms)+list(self.dterms)
pnames.sort()
return (self.__class__, tuple([(k, id(self.__dict__[k])) for k in pnames if k in self.__dict__])) | Semantic id. | Below is the the instruction that describes the task:
### Input:
Semantic id.
### Response:
def sid(self):
"""Semantic id."""
pnames = list(self.terms)+list(self.dterms)
pnames.sort()
return (self.__class__, tuple([(k, id(self.__dict__[k])) for k in pnames if k in self.__dict__])) |
def __live_receivers(signal):
"""Return all signal handlers that are currently still alive for the
input `signal`.
Args:
signal: A signal name.
Returns:
A list of callable receivers for the input signal.
"""
with __lock:
__purge()
receivers = [funcref() for funcref in __receivers[signal]]
return receivers | Return all signal handlers that are currently still alive for the
input `signal`.
Args:
signal: A signal name.
Returns:
A list of callable receivers for the input signal. | Below is the the instruction that describes the task:
### Input:
Return all signal handlers that are currently still alive for the
input `signal`.
Args:
signal: A signal name.
Returns:
A list of callable receivers for the input signal.
### Response:
def __live_receivers(signal):
"""Return all signal handlers that are currently still alive for the
input `signal`.
Args:
signal: A signal name.
Returns:
A list of callable receivers for the input signal.
"""
with __lock:
__purge()
receivers = [funcref() for funcref in __receivers[signal]]
return receivers |
def is_subset(self, other):
"""Check that every element in self has a count <= in other.
Args:
other (Set)
"""
if isinstance(other, _basebag):
for elem, count in self.counts():
if not count <= other.count(elem):
return False
else:
for elem in self:
if self.count(elem) > 1 or elem not in other:
return False
return True | Check that every element in self has a count <= in other.
Args:
other (Set) | Below is the the instruction that describes the task:
### Input:
Check that every element in self has a count <= in other.
Args:
other (Set)
### Response:
def is_subset(self, other):
"""Check that every element in self has a count <= in other.
Args:
other (Set)
"""
if isinstance(other, _basebag):
for elem, count in self.counts():
if not count <= other.count(elem):
return False
else:
for elem in self:
if self.count(elem) > 1 or elem not in other:
return False
return True |
def get_range(self, process_err_pct=0.05):
"""
Returns slant range to the object. Call once for each
new measurement at dt time from last call.
"""
vel = self.vel + 5 * randn()
alt = self.alt + 10 * randn()
self.pos += vel*self.dt
err = (self.pos * process_err_pct) * randn()
slant_range = (self.pos**2 + alt**2)**.5 + err
return slant_range | Returns slant range to the object. Call once for each
new measurement at dt time from last call. | Below is the the instruction that describes the task:
### Input:
Returns slant range to the object. Call once for each
new measurement at dt time from last call.
### Response:
def get_range(self, process_err_pct=0.05):
"""
Returns slant range to the object. Call once for each
new measurement at dt time from last call.
"""
vel = self.vel + 5 * randn()
alt = self.alt + 10 * randn()
self.pos += vel*self.dt
err = (self.pos * process_err_pct) * randn()
slant_range = (self.pos**2 + alt**2)**.5 + err
return slant_range |
def cleanup(self):
"""Forcefully delete objects from memory
In an ideal world, this shouldn't be necessary. Garbage
collection guarantees that anything without reference
is automatically removed.
However, because this application is designed to be run
multiple times from the same interpreter process, extra
case must be taken to ensure there are no memory leaks.
Explicitly deleting objects shines a light on where objects
may still be referenced in the form of an error. No errors
means this was uneccesary, but that's ok.
"""
for instance in self.context:
del(instance)
for plugin in self.plugins:
del(plugin) | Forcefully delete objects from memory
In an ideal world, this shouldn't be necessary. Garbage
collection guarantees that anything without reference
is automatically removed.
However, because this application is designed to be run
multiple times from the same interpreter process, extra
case must be taken to ensure there are no memory leaks.
Explicitly deleting objects shines a light on where objects
may still be referenced in the form of an error. No errors
means this was uneccesary, but that's ok. | Below is the the instruction that describes the task:
### Input:
Forcefully delete objects from memory
In an ideal world, this shouldn't be necessary. Garbage
collection guarantees that anything without reference
is automatically removed.
However, because this application is designed to be run
multiple times from the same interpreter process, extra
case must be taken to ensure there are no memory leaks.
Explicitly deleting objects shines a light on where objects
may still be referenced in the form of an error. No errors
means this was uneccesary, but that's ok.
### Response:
def cleanup(self):
"""Forcefully delete objects from memory
In an ideal world, this shouldn't be necessary. Garbage
collection guarantees that anything without reference
is automatically removed.
However, because this application is designed to be run
multiple times from the same interpreter process, extra
case must be taken to ensure there are no memory leaks.
Explicitly deleting objects shines a light on where objects
may still be referenced in the form of an error. No errors
means this was uneccesary, but that's ok.
"""
for instance in self.context:
del(instance)
for plugin in self.plugins:
del(plugin) |
def _delete(self, *args, **kwargs):
"""
A wrapper for deleting things
:returns: The response of your delete
:rtype: dict
"""
response = requests.delete(*args, **kwargs)
response.raise_for_status() | A wrapper for deleting things
:returns: The response of your delete
:rtype: dict | Below is the the instruction that describes the task:
### Input:
A wrapper for deleting things
:returns: The response of your delete
:rtype: dict
### Response:
def _delete(self, *args, **kwargs):
"""
A wrapper for deleting things
:returns: The response of your delete
:rtype: dict
"""
response = requests.delete(*args, **kwargs)
response.raise_for_status() |
def _get_output_nodes(self, output_path, error_path):
"""
Extracts output nodes from the standard output and standard error
files.
"""
from aiida.orm.data.array.trajectory import TrajectoryData
import re
state = None
step = None
scale = None
with open(output_path) as f:
lines = [x.strip('\n') for x in f.readlines()]
result_dict = dict()
trajectory = None
for line in lines:
if state is None and re.match('^\s*NWChem SCF Module\s*$',line):
state = 'nwchem-scf-module'
continue
if state is None and re.match('^\s*NWChem Geometry Optimization\s*$',line):
state = 'nwchem-geometry-optimisation'
trajectory = TrajectoryData()
continue
if state == 'nwchem-scf-module' and re.match('^\s*Final RHF \s*results\s*$',line):
state = 'final-rhf-results'
continue
if re.match('^\s*\-*\s*$',line):
continue
if state == 'final-rhf-results':
result = re.match('^\s*([^=]+?)\s*=\s*([\-\d\.]+)$',line)
if result:
key = re.sub('[^a-zA-Z0-9]+', '_', result.group(1).lower())
result_dict[key] = result.group(2)
else:
state = 'nwchem-scf-module'
if state == 'nwchem-geometry-optimisation' and re.match('^\s*Step\s+\d+\s*$',line):
result = re.match('^\s*Step\s+(\d+)\s*$',line)
step = result.group(1)
continue
if state == 'nwchem-geometry-optimisation' and \
re.match('^\s*Output coordinates in a.u.',line):
state = 'nwchem-geometry-optimisation-coordinates'
result = re.match('scale by \s(*[\-\d\.]+)',line)
scale = result.group(1)
continue
return [('parameters', ParameterData(dict=result_dict))] | Extracts output nodes from the standard output and standard error
files. | Below is the the instruction that describes the task:
### Input:
Extracts output nodes from the standard output and standard error
files.
### Response:
def _get_output_nodes(self, output_path, error_path):
"""
Extracts output nodes from the standard output and standard error
files.
"""
from aiida.orm.data.array.trajectory import TrajectoryData
import re
state = None
step = None
scale = None
with open(output_path) as f:
lines = [x.strip('\n') for x in f.readlines()]
result_dict = dict()
trajectory = None
for line in lines:
if state is None and re.match('^\s*NWChem SCF Module\s*$',line):
state = 'nwchem-scf-module'
continue
if state is None and re.match('^\s*NWChem Geometry Optimization\s*$',line):
state = 'nwchem-geometry-optimisation'
trajectory = TrajectoryData()
continue
if state == 'nwchem-scf-module' and re.match('^\s*Final RHF \s*results\s*$',line):
state = 'final-rhf-results'
continue
if re.match('^\s*\-*\s*$',line):
continue
if state == 'final-rhf-results':
result = re.match('^\s*([^=]+?)\s*=\s*([\-\d\.]+)$',line)
if result:
key = re.sub('[^a-zA-Z0-9]+', '_', result.group(1).lower())
result_dict[key] = result.group(2)
else:
state = 'nwchem-scf-module'
if state == 'nwchem-geometry-optimisation' and re.match('^\s*Step\s+\d+\s*$',line):
result = re.match('^\s*Step\s+(\d+)\s*$',line)
step = result.group(1)
continue
if state == 'nwchem-geometry-optimisation' and \
re.match('^\s*Output coordinates in a.u.',line):
state = 'nwchem-geometry-optimisation-coordinates'
result = re.match('scale by \s(*[\-\d\.]+)',line)
scale = result.group(1)
continue
return [('parameters', ParameterData(dict=result_dict))] |
def reset_api_secret(context, id, etag):
"""reset_api_secret(context, id, etag)
Reset a Feeder api_secret.
>>> dcictl feeder-reset-api-secret [OPTIONS]
:param string id: ID of the feeder [required]
:param string etag: Entity tag of the feeder resource [required]
"""
result = feeder.reset_api_secret(context, id=id, etag=etag)
utils.format_output(result, context.format,
headers=['id', 'api_secret', 'etag']) | reset_api_secret(context, id, etag)
Reset a Feeder api_secret.
>>> dcictl feeder-reset-api-secret [OPTIONS]
:param string id: ID of the feeder [required]
:param string etag: Entity tag of the feeder resource [required] | Below is the the instruction that describes the task:
### Input:
reset_api_secret(context, id, etag)
Reset a Feeder api_secret.
>>> dcictl feeder-reset-api-secret [OPTIONS]
:param string id: ID of the feeder [required]
:param string etag: Entity tag of the feeder resource [required]
### Response:
def reset_api_secret(context, id, etag):
"""reset_api_secret(context, id, etag)
Reset a Feeder api_secret.
>>> dcictl feeder-reset-api-secret [OPTIONS]
:param string id: ID of the feeder [required]
:param string etag: Entity tag of the feeder resource [required]
"""
result = feeder.reset_api_secret(context, id=id, etag=etag)
utils.format_output(result, context.format,
headers=['id', 'api_secret', 'etag']) |
def run_individual(sim_var,
reference,
neuroml_file,
nml_doc,
still_included,
generate_dir,
target,
sim_time,
dt,
simulator,
cleanup = True,
show=False):
"""
Run an individual simulation.
The candidate data has been flattened into the sim_var dict. The
sim_var dict contains parameter:value key value pairs, which are
applied to the model before it is simulated.
"""
for var_name in sim_var.keys():
individual_var_names = var_name.split('+')
for individual_var_name in individual_var_names:
words = individual_var_name.split('/')
type, id1 = words[0].split(':')
if ':' in words[1]:
variable, id2 = words[1].split(':')
else:
variable = words[1]
id2 = None
units = words[2]
value = sim_var[var_name]
pyneuroml.pynml.print_comment_v(' Changing value of %s (%s) in %s (%s) to: %s %s'%(variable, id2, type, id1, value, units))
if type == 'channel':
channel = nml_doc.get_by_id(id1)
if channel:
print("Setting channel %s"%(channel))
if variable == 'vShift':
channel.v_shift = '%s %s'%(value, units)
else:
pyneuroml.pynml.print_comment_v('Could not find channel with id %s from expression: %s'%(id1, individual_var_name))
exit()
elif type == 'cell':
cell = None
for c in nml_doc.cells:
if c.id == id1:
cell = c
if variable == 'channelDensity':
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts:
if cd.id == id2:
chanDens = cd
chanDens.cond_density = '%s %s'%(value, units)
elif variable == 'vShift_channelDensity':
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_density_v_shifts:
if cd.id == id2:
chanDens = cd
chanDens.v_shift = '%s %s'%(value, units)
elif variable == 'channelDensityNernst':
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_density_nernsts:
if cd.id == id2:
chanDens = cd
chanDens.cond_density = '%s %s'%(value, units)
elif variable == 'erev_id': # change all values of erev in channelDensity elements with only this id
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts:
if cd.id == id2:
chanDens = cd
chanDens.erev = '%s %s'%(value, units)
elif variable == 'erev_ion': # change all values of erev in channelDensity elements with this ion
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts:
if cd.ion == id2:
chanDens = cd
chanDens.erev = '%s %s'%(value, units)
elif variable == 'specificCapacitance':
specCap = None
for sc in cell.biophysical_properties.membrane_properties.specific_capacitances:
if (sc.segment_groups == None and id2 == 'all') or sc.segment_groups == id2 :
specCap = sc
specCap.value = '%s %s'%(value, units)
elif variable == 'resistivity':
resistivity = None
for rs in cell.biophysical_properties.intracellular_properties.resistivities:
if (rs.segment_groups == None and id2 == 'all') or rs.segment_groups == id2 :
resistivity = rs
resistivity.value = '%s %s'%(value, units)
else:
pyneuroml.pynml.print_comment_v('Unknown variable (%s) in variable expression: %s'%(variable, individual_var_name))
exit()
elif type == 'izhikevich2007Cell':
izhcell = None
for c in nml_doc.izhikevich2007_cells:
if c.id == id1:
izhcell = c
izhcell.__setattr__(variable, '%s %s'%(value, units))
else:
pyneuroml.pynml.print_comment_v('Unknown type (%s) in variable expression: %s'%(type, individual_var_name))
new_neuroml_file = '%s/%s'%(generate_dir,os.path.basename(neuroml_file))
if new_neuroml_file == neuroml_file:
pyneuroml.pynml.print_comment_v('Cannot use a directory for generating into (%s) which is the same location of the NeuroML file (%s)!'% \
(neuroml_file, generate_dir))
pyneuroml.pynml.write_neuroml2_file(nml_doc, new_neuroml_file)
for include in still_included:
inc_loc = '%s/%s'%(os.path.dirname(os.path.abspath(neuroml_file)),include)
pyneuroml.pynml.print_comment_v("Copying non included file %s to %s (%s) beside %s"%(inc_loc, generate_dir,os.path.abspath(generate_dir), new_neuroml_file))
shutil.copy(inc_loc, generate_dir)
from pyneuroml.tune.NeuroMLSimulation import NeuroMLSimulation
sim = NeuroMLSimulation(reference,
neuroml_file = new_neuroml_file,
target = target,
sim_time = sim_time,
dt = dt,
simulator = simulator,
generate_dir = generate_dir,
cleanup = cleanup,
nml_doc = nml_doc)
sim.go()
if show:
sim.show()
return sim.t, sim.volts | Run an individual simulation.
The candidate data has been flattened into the sim_var dict. The
sim_var dict contains parameter:value key value pairs, which are
applied to the model before it is simulated. | Below is the the instruction that describes the task:
### Input:
Run an individual simulation.
The candidate data has been flattened into the sim_var dict. The
sim_var dict contains parameter:value key value pairs, which are
applied to the model before it is simulated.
### Response:
def run_individual(sim_var,
reference,
neuroml_file,
nml_doc,
still_included,
generate_dir,
target,
sim_time,
dt,
simulator,
cleanup = True,
show=False):
"""
Run an individual simulation.
The candidate data has been flattened into the sim_var dict. The
sim_var dict contains parameter:value key value pairs, which are
applied to the model before it is simulated.
"""
for var_name in sim_var.keys():
individual_var_names = var_name.split('+')
for individual_var_name in individual_var_names:
words = individual_var_name.split('/')
type, id1 = words[0].split(':')
if ':' in words[1]:
variable, id2 = words[1].split(':')
else:
variable = words[1]
id2 = None
units = words[2]
value = sim_var[var_name]
pyneuroml.pynml.print_comment_v(' Changing value of %s (%s) in %s (%s) to: %s %s'%(variable, id2, type, id1, value, units))
if type == 'channel':
channel = nml_doc.get_by_id(id1)
if channel:
print("Setting channel %s"%(channel))
if variable == 'vShift':
channel.v_shift = '%s %s'%(value, units)
else:
pyneuroml.pynml.print_comment_v('Could not find channel with id %s from expression: %s'%(id1, individual_var_name))
exit()
elif type == 'cell':
cell = None
for c in nml_doc.cells:
if c.id == id1:
cell = c
if variable == 'channelDensity':
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts:
if cd.id == id2:
chanDens = cd
chanDens.cond_density = '%s %s'%(value, units)
elif variable == 'vShift_channelDensity':
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_density_v_shifts:
if cd.id == id2:
chanDens = cd
chanDens.v_shift = '%s %s'%(value, units)
elif variable == 'channelDensityNernst':
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_density_nernsts:
if cd.id == id2:
chanDens = cd
chanDens.cond_density = '%s %s'%(value, units)
elif variable == 'erev_id': # change all values of erev in channelDensity elements with only this id
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts:
if cd.id == id2:
chanDens = cd
chanDens.erev = '%s %s'%(value, units)
elif variable == 'erev_ion': # change all values of erev in channelDensity elements with this ion
chanDens = None
for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts:
if cd.ion == id2:
chanDens = cd
chanDens.erev = '%s %s'%(value, units)
elif variable == 'specificCapacitance':
specCap = None
for sc in cell.biophysical_properties.membrane_properties.specific_capacitances:
if (sc.segment_groups == None and id2 == 'all') or sc.segment_groups == id2 :
specCap = sc
specCap.value = '%s %s'%(value, units)
elif variable == 'resistivity':
resistivity = None
for rs in cell.biophysical_properties.intracellular_properties.resistivities:
if (rs.segment_groups == None and id2 == 'all') or rs.segment_groups == id2 :
resistivity = rs
resistivity.value = '%s %s'%(value, units)
else:
pyneuroml.pynml.print_comment_v('Unknown variable (%s) in variable expression: %s'%(variable, individual_var_name))
exit()
elif type == 'izhikevich2007Cell':
izhcell = None
for c in nml_doc.izhikevich2007_cells:
if c.id == id1:
izhcell = c
izhcell.__setattr__(variable, '%s %s'%(value, units))
else:
pyneuroml.pynml.print_comment_v('Unknown type (%s) in variable expression: %s'%(type, individual_var_name))
new_neuroml_file = '%s/%s'%(generate_dir,os.path.basename(neuroml_file))
if new_neuroml_file == neuroml_file:
pyneuroml.pynml.print_comment_v('Cannot use a directory for generating into (%s) which is the same location of the NeuroML file (%s)!'% \
(neuroml_file, generate_dir))
pyneuroml.pynml.write_neuroml2_file(nml_doc, new_neuroml_file)
for include in still_included:
inc_loc = '%s/%s'%(os.path.dirname(os.path.abspath(neuroml_file)),include)
pyneuroml.pynml.print_comment_v("Copying non included file %s to %s (%s) beside %s"%(inc_loc, generate_dir,os.path.abspath(generate_dir), new_neuroml_file))
shutil.copy(inc_loc, generate_dir)
from pyneuroml.tune.NeuroMLSimulation import NeuroMLSimulation
sim = NeuroMLSimulation(reference,
neuroml_file = new_neuroml_file,
target = target,
sim_time = sim_time,
dt = dt,
simulator = simulator,
generate_dir = generate_dir,
cleanup = cleanup,
nml_doc = nml_doc)
sim.go()
if show:
sim.show()
return sim.t, sim.volts |
def apply_filter_rule(self, _filter, query='in:inbox', way='in'):
"""
:param: _filter _filter a zobjects.FilterRule or the filter name
:param: query on what will the filter be applied
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: list of impacted message's ids
"""
if isinstance(_filter, zobjects.FilterRule):
_filter = _filter.name
content = {
'filterRules': {
'filterRule': {'name': _filter}
},
'query': {'_content': query}
}
if way == 'in':
ids = self.request('ApplyFilterRules', content)
elif way == 'out':
ids = self.request('ApplyOutgoingFilterRules', content)
if ids:
return [int(m) for m in ids['m']['ids'].split(',')]
else:
return [] | :param: _filter _filter a zobjects.FilterRule or the filter name
:param: query on what will the filter be applied
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: list of impacted message's ids | Below is the the instruction that describes the task:
### Input:
:param: _filter _filter a zobjects.FilterRule or the filter name
:param: query on what will the filter be applied
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: list of impacted message's ids
### Response:
def apply_filter_rule(self, _filter, query='in:inbox', way='in'):
"""
:param: _filter _filter a zobjects.FilterRule or the filter name
:param: query on what will the filter be applied
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: list of impacted message's ids
"""
if isinstance(_filter, zobjects.FilterRule):
_filter = _filter.name
content = {
'filterRules': {
'filterRule': {'name': _filter}
},
'query': {'_content': query}
}
if way == 'in':
ids = self.request('ApplyFilterRules', content)
elif way == 'out':
ids = self.request('ApplyOutgoingFilterRules', content)
if ids:
return [int(m) for m in ids['m']['ids'].split(',')]
else:
return [] |
def unset_default_org(self):
""" unset the default orgs for tasks """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
del org_config.config["default"]
self.set_org(org_config) | unset the default orgs for tasks | Below is the the instruction that describes the task:
### Input:
unset the default orgs for tasks
### Response:
def unset_default_org(self):
""" unset the default orgs for tasks """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
del org_config.config["default"]
self.set_org(org_config) |
def close(self):
"""
Disconnect and error-out all requests.
"""
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.endpoint)
reactor.callFromThread(self.connector.disconnect)
log.debug("Closed socket to %s", self.endpoint)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.endpoint))
# don't leave in-progress operations hanging
self.connected_event.set() | Disconnect and error-out all requests. | Below is the the instruction that describes the task:
### Input:
Disconnect and error-out all requests.
### Response:
def close(self):
"""
Disconnect and error-out all requests.
"""
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.endpoint)
reactor.callFromThread(self.connector.disconnect)
log.debug("Closed socket to %s", self.endpoint)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.endpoint))
# don't leave in-progress operations hanging
self.connected_event.set() |
def find_frame_urls(self, site, frametype, gpsstart, gpsend,
match=None, urltype=None, on_gaps="warn"):
"""Find the framefiles for the given type in the [start, end) interval
frame
@param site:
single-character name of site to match
@param frametype:
name of frametype to match
@param gpsstart:
integer GPS start time of query
@param gpsend:
integer GPS end time of query
@param match:
regular expression to match against
@param urltype:
file scheme to search for (e.g. 'file')
@param on_gaps:
what to do when the requested frame isn't found, one of:
- C{'warn'} (default): print a warning,
- C{'error'}: raise an L{RuntimeError}, or
- C{'ignore'}: do nothing
@type site: L{str}
@type frametype: L{str}
@type gpsstart: L{int}
@type gpsend: L{int}
@type match: L{str}
@type urltype: L{str}
@type on_gaps: L{str}
@returns: L{Cache<pycbc_glue.lal.Cache>}
@raises RuntimeError: if gaps are found and C{on_gaps='error'}
"""
if on_gaps not in ("warn", "error", "ignore"):
raise ValueError("on_gaps must be 'warn', 'error', or 'ignore'.")
url = ("%s/gwf/%s/%s/%s,%s"
% (_url_prefix, site, frametype, gpsstart, gpsend))
# if a URL type is specified append it to the path
if urltype:
url += "/%s" % urltype
# request JSON output
url += ".json"
# append a regex if input
if match:
url += "?match=%s" % match
# make query
response = self._requestresponse("GET", url)
urllist = decode(response.read())
out = lal.Cache([lal.CacheEntry.from_T050017(x,
coltype=self.LIGOTimeGPSType) for x in urllist])
if on_gaps == "ignore":
return out
else:
span = segments.segment(gpsstart, gpsend)
seglist = segments.segmentlist(e.segment for e in out).coalesce()
missing = (segments.segmentlist([span]) - seglist).coalesce()
if span in seglist:
return out
else:
msg = "Missing segments: \n%s" % "\n".join(map(str, missing))
if on_gaps=="warn":
sys.stderr.write("%s\n" % msg)
return out
else:
raise RuntimeError(msg) | Find the framefiles for the given type in the [start, end) interval
frame
@param site:
single-character name of site to match
@param frametype:
name of frametype to match
@param gpsstart:
integer GPS start time of query
@param gpsend:
integer GPS end time of query
@param match:
regular expression to match against
@param urltype:
file scheme to search for (e.g. 'file')
@param on_gaps:
what to do when the requested frame isn't found, one of:
- C{'warn'} (default): print a warning,
- C{'error'}: raise an L{RuntimeError}, or
- C{'ignore'}: do nothing
@type site: L{str}
@type frametype: L{str}
@type gpsstart: L{int}
@type gpsend: L{int}
@type match: L{str}
@type urltype: L{str}
@type on_gaps: L{str}
@returns: L{Cache<pycbc_glue.lal.Cache>}
@raises RuntimeError: if gaps are found and C{on_gaps='error'} | Below is the the instruction that describes the task:
### Input:
Find the framefiles for the given type in the [start, end) interval
frame
@param site:
single-character name of site to match
@param frametype:
name of frametype to match
@param gpsstart:
integer GPS start time of query
@param gpsend:
integer GPS end time of query
@param match:
regular expression to match against
@param urltype:
file scheme to search for (e.g. 'file')
@param on_gaps:
what to do when the requested frame isn't found, one of:
- C{'warn'} (default): print a warning,
- C{'error'}: raise an L{RuntimeError}, or
- C{'ignore'}: do nothing
@type site: L{str}
@type frametype: L{str}
@type gpsstart: L{int}
@type gpsend: L{int}
@type match: L{str}
@type urltype: L{str}
@type on_gaps: L{str}
@returns: L{Cache<pycbc_glue.lal.Cache>}
@raises RuntimeError: if gaps are found and C{on_gaps='error'}
### Response:
def find_frame_urls(self, site, frametype, gpsstart, gpsend,
match=None, urltype=None, on_gaps="warn"):
"""Find the framefiles for the given type in the [start, end) interval
frame
@param site:
single-character name of site to match
@param frametype:
name of frametype to match
@param gpsstart:
integer GPS start time of query
@param gpsend:
integer GPS end time of query
@param match:
regular expression to match against
@param urltype:
file scheme to search for (e.g. 'file')
@param on_gaps:
what to do when the requested frame isn't found, one of:
- C{'warn'} (default): print a warning,
- C{'error'}: raise an L{RuntimeError}, or
- C{'ignore'}: do nothing
@type site: L{str}
@type frametype: L{str}
@type gpsstart: L{int}
@type gpsend: L{int}
@type match: L{str}
@type urltype: L{str}
@type on_gaps: L{str}
@returns: L{Cache<pycbc_glue.lal.Cache>}
@raises RuntimeError: if gaps are found and C{on_gaps='error'}
"""
if on_gaps not in ("warn", "error", "ignore"):
raise ValueError("on_gaps must be 'warn', 'error', or 'ignore'.")
url = ("%s/gwf/%s/%s/%s,%s"
% (_url_prefix, site, frametype, gpsstart, gpsend))
# if a URL type is specified append it to the path
if urltype:
url += "/%s" % urltype
# request JSON output
url += ".json"
# append a regex if input
if match:
url += "?match=%s" % match
# make query
response = self._requestresponse("GET", url)
urllist = decode(response.read())
out = lal.Cache([lal.CacheEntry.from_T050017(x,
coltype=self.LIGOTimeGPSType) for x in urllist])
if on_gaps == "ignore":
return out
else:
span = segments.segment(gpsstart, gpsend)
seglist = segments.segmentlist(e.segment for e in out).coalesce()
missing = (segments.segmentlist([span]) - seglist).coalesce()
if span in seglist:
return out
else:
msg = "Missing segments: \n%s" % "\n".join(map(str, missing))
if on_gaps=="warn":
sys.stderr.write("%s\n" % msg)
return out
else:
raise RuntimeError(msg) |
def get_atom_sequence_to_rosetta_map(self):
'''Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta.
We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue.
e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue
those residues to None.
Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None.
'''
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
atom_sequence_to_rosetta_mapping = {}
for chain_id, mapping in self.rosetta_to_atom_sequence_maps.iteritems():
chain_mapping = {}
for k in mapping:
chain_mapping[k[1]] = k[0]
atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap.from_dict(chain_mapping)
# Add empty maps for missing chains
for chain_id, sequence in self.atom_sequences.iteritems():
if not atom_sequence_to_rosetta_mapping.get(chain_id):
atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap()
return atom_sequence_to_rosetta_mapping | Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta.
We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue.
e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue
those residues to None.
Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None. | Below is the the instruction that describes the task:
### Input:
Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta.
We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue.
e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue
those residues to None.
Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None.
### Response:
def get_atom_sequence_to_rosetta_map(self):
'''Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta.
We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue.
e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue
those residues to None.
Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None.
'''
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
atom_sequence_to_rosetta_mapping = {}
for chain_id, mapping in self.rosetta_to_atom_sequence_maps.iteritems():
chain_mapping = {}
for k in mapping:
chain_mapping[k[1]] = k[0]
atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap.from_dict(chain_mapping)
# Add empty maps for missing chains
for chain_id, sequence in self.atom_sequences.iteritems():
if not atom_sequence_to_rosetta_mapping.get(chain_id):
atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap()
return atom_sequence_to_rosetta_mapping |
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers) | Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple. | Below is the the instruction that describes the task:
### Input:
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
### Response:
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers) |
def dumps(obj, *args, **kwargs):
"""Serialize a object to string
Basic Usage:
>>> import simplekit.objson
>>> obj = {'name':'wendy'}
>>> print simplekit.objson.dumps(obj)
:param obj: a object which need to dump
:param args: Optional arguments that :func:`json.dumps` takes.
:param kwargs: Keys arguments that :py:func:`json.dumps` takes.
:return: string
"""
kwargs['default'] = object2dict
return json.dumps(obj, *args, **kwargs) | Serialize a object to string
Basic Usage:
>>> import simplekit.objson
>>> obj = {'name':'wendy'}
>>> print simplekit.objson.dumps(obj)
:param obj: a object which need to dump
:param args: Optional arguments that :func:`json.dumps` takes.
:param kwargs: Keys arguments that :py:func:`json.dumps` takes.
:return: string | Below is the the instruction that describes the task:
### Input:
Serialize a object to string
Basic Usage:
>>> import simplekit.objson
>>> obj = {'name':'wendy'}
>>> print simplekit.objson.dumps(obj)
:param obj: a object which need to dump
:param args: Optional arguments that :func:`json.dumps` takes.
:param kwargs: Keys arguments that :py:func:`json.dumps` takes.
:return: string
### Response:
def dumps(obj, *args, **kwargs):
"""Serialize a object to string
Basic Usage:
>>> import simplekit.objson
>>> obj = {'name':'wendy'}
>>> print simplekit.objson.dumps(obj)
:param obj: a object which need to dump
:param args: Optional arguments that :func:`json.dumps` takes.
:param kwargs: Keys arguments that :py:func:`json.dumps` takes.
:return: string
"""
kwargs['default'] = object2dict
return json.dumps(obj, *args, **kwargs) |
def _parse_relationships(self, relationships):
""" Ensure compliance with the spec's relationships section
Specifically, the relationships object of the single resource
object. For modifications we only support relationships via
the `data` key referred to as Resource Linkage.
:param relationships:
dict JSON API relationships object
"""
link = 'jsonapi.org/format/#document-resource-object-relationships'
if not isinstance(relationships, dict):
self.fail('The JSON API resource object relationships key MUST '
'be a hash & comply with the spec\'s resource linkage '
'section.', link)
for key, val in relationships.items():
if not isinstance(val, dict) or 'data' not in val:
self.fail('Relationship key %s MUST be a hash & contain '
'a `data` field compliant with the spec\'s '
'resource linkage section.' % key, link)
elif isinstance(val['data'], dict):
data = val['data']
rid = isinstance(data.get('id'), unicode)
rtype = isinstance(data.get('type'), unicode)
if not rid or not rtype:
self.fail('%s relationship\'s resource linkage MUST '
'contain `id` & `type` fields. Additionally, '
'they must both be strings.' % key, link)
elif isinstance(val['data'], list):
abort(exceptions.ModificationDenied(**{
'detail': 'Modifying the %s relationship or any to-many '
'relationships for that matter are is not '
'currently supported. Instead, modify the '
'to-one side directly.' % key,
'links': link,
}))
elif val['data']:
self.fail('The relationship key %s is malformed & impossible '
'for us to understand your intentions. It MUST be '
'a hash & contain a `data` field compliant with '
'the spec\'s resource linkage section or null if '
'you want to unset the relationship.' % key, link) | Ensure compliance with the spec's relationships section
Specifically, the relationships object of the single resource
object. For modifications we only support relationships via
the `data` key referred to as Resource Linkage.
:param relationships:
dict JSON API relationships object | Below is the the instruction that describes the task:
### Input:
Ensure compliance with the spec's relationships section
Specifically, the relationships object of the single resource
object. For modifications we only support relationships via
the `data` key referred to as Resource Linkage.
:param relationships:
dict JSON API relationships object
### Response:
def _parse_relationships(self, relationships):
""" Ensure compliance with the spec's relationships section
Specifically, the relationships object of the single resource
object. For modifications we only support relationships via
the `data` key referred to as Resource Linkage.
:param relationships:
dict JSON API relationships object
"""
link = 'jsonapi.org/format/#document-resource-object-relationships'
if not isinstance(relationships, dict):
self.fail('The JSON API resource object relationships key MUST '
'be a hash & comply with the spec\'s resource linkage '
'section.', link)
for key, val in relationships.items():
if not isinstance(val, dict) or 'data' not in val:
self.fail('Relationship key %s MUST be a hash & contain '
'a `data` field compliant with the spec\'s '
'resource linkage section.' % key, link)
elif isinstance(val['data'], dict):
data = val['data']
rid = isinstance(data.get('id'), unicode)
rtype = isinstance(data.get('type'), unicode)
if not rid or not rtype:
self.fail('%s relationship\'s resource linkage MUST '
'contain `id` & `type` fields. Additionally, '
'they must both be strings.' % key, link)
elif isinstance(val['data'], list):
abort(exceptions.ModificationDenied(**{
'detail': 'Modifying the %s relationship or any to-many '
'relationships for that matter are is not '
'currently supported. Instead, modify the '
'to-one side directly.' % key,
'links': link,
}))
elif val['data']:
self.fail('The relationship key %s is malformed & impossible '
'for us to understand your intentions. It MUST be '
'a hash & contain a `data` field compliant with '
'the spec\'s resource linkage section or null if '
'you want to unset the relationship.' % key, link) |
def chopurl(url):
'''
A sane "stand alone" url parser.
Example:
info = chopurl(url)
'''
ret = {}
if url.find('://') == -1:
raise s_exc.BadUrl(':// not found in [{}]!'.format(url))
scheme, remain = url.split('://', 1)
ret['scheme'] = scheme.lower()
# carve query params from the end
if remain.find('?') != -1:
query = {}
remain, queryrem = remain.split('?', 1)
for qkey in queryrem.split('&'):
qval = None
if qkey.find('=') != -1:
qkey, qval = qkey.split('=', 1)
query[qkey] = qval
ret['query'] = query
pathrem = ''
slashoff = remain.find('/')
if slashoff != -1:
pathrem = remain[slashoff:]
remain = remain[:slashoff]
# detect user[:passwd]@netloc syntax
if remain.find('@') != -1:
user, remain = remain.rsplit('@', 1)
if user.find(':') != -1:
user, passwd = user.split(':', 1)
ret['passwd'] = passwd
ret['user'] = user
# remain should be down to host[:port]
# detect ipv6 [addr]:port syntax
if remain.startswith('['):
hostrem, portstr = remain.rsplit(':', 1)
ret['port'] = int(portstr)
ret['host'] = hostrem[1:-1]
# detect ipv6 without port syntax
elif remain.count(':') > 1:
ret['host'] = remain
# regular old host or host:port syntax
else:
if remain.find(':') != -1:
remain, portstr = remain.split(':', 1)
ret['port'] = int(portstr)
ret['host'] = remain
ret['path'] = pathrem
return ret | A sane "stand alone" url parser.
Example:
info = chopurl(url) | Below is the the instruction that describes the task:
### Input:
A sane "stand alone" url parser.
Example:
info = chopurl(url)
### Response:
def chopurl(url):
'''
A sane "stand alone" url parser.
Example:
info = chopurl(url)
'''
ret = {}
if url.find('://') == -1:
raise s_exc.BadUrl(':// not found in [{}]!'.format(url))
scheme, remain = url.split('://', 1)
ret['scheme'] = scheme.lower()
# carve query params from the end
if remain.find('?') != -1:
query = {}
remain, queryrem = remain.split('?', 1)
for qkey in queryrem.split('&'):
qval = None
if qkey.find('=') != -1:
qkey, qval = qkey.split('=', 1)
query[qkey] = qval
ret['query'] = query
pathrem = ''
slashoff = remain.find('/')
if slashoff != -1:
pathrem = remain[slashoff:]
remain = remain[:slashoff]
# detect user[:passwd]@netloc syntax
if remain.find('@') != -1:
user, remain = remain.rsplit('@', 1)
if user.find(':') != -1:
user, passwd = user.split(':', 1)
ret['passwd'] = passwd
ret['user'] = user
# remain should be down to host[:port]
# detect ipv6 [addr]:port syntax
if remain.startswith('['):
hostrem, portstr = remain.rsplit(':', 1)
ret['port'] = int(portstr)
ret['host'] = hostrem[1:-1]
# detect ipv6 without port syntax
elif remain.count(':') > 1:
ret['host'] = remain
# regular old host or host:port syntax
else:
if remain.find(':') != -1:
remain, portstr = remain.split(':', 1)
ret['port'] = int(portstr)
ret['host'] = remain
ret['path'] = pathrem
return ret |
def color_palette(name=None, n_colors=6, desat=None):
"""Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements
"""
seaborn_palettes = dict(
deep=["#4C72B0", "#55A868", "#C44E52",
"#8172B2", "#CCB974", "#64B5CD"],
muted=["#4878CF", "#6ACC65", "#D65F5F",
"#B47CC7", "#C4AD66", "#77BEDB"],
pastel=["#92C6FF", "#97F0AA", "#FF9F9A",
"#D0BBFF", "#FFFEA3", "#B0E0E6"],
bright=["#003FFF", "#03ED3A", "#E8000B",
"#8A2BE2", "#FFC400", "#00D7FF"],
dark=["#001C7F", "#017517", "#8C0900",
"#7600A1", "#B8860B", "#006374"],
colorblind=["#0072B2", "#009E73", "#D55E00",
"#CC79A7", "#F0E442", "#56B4E9"],
)
if name is None:
palette = mpl.rcParams["axes.color_cycle"]
elif not isinstance(name, string_types):
palette = name
elif name == "hls":
palette = hls_palette(n_colors)
elif name == "husl":
palette = husl_palette(n_colors)
elif name in seaborn_palettes:
palette = seaborn_palettes[name]
elif name in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
elif name[:-2] in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
else:
raise ValueError("%s is not a valid palette name" % name)
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError("Could not generate a palette for %s" % str(name))
return palette | Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements | Below is the the instruction that describes the task:
### Input:
Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements
### Response:
def color_palette(name=None, n_colors=6, desat=None):
"""Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements
"""
seaborn_palettes = dict(
deep=["#4C72B0", "#55A868", "#C44E52",
"#8172B2", "#CCB974", "#64B5CD"],
muted=["#4878CF", "#6ACC65", "#D65F5F",
"#B47CC7", "#C4AD66", "#77BEDB"],
pastel=["#92C6FF", "#97F0AA", "#FF9F9A",
"#D0BBFF", "#FFFEA3", "#B0E0E6"],
bright=["#003FFF", "#03ED3A", "#E8000B",
"#8A2BE2", "#FFC400", "#00D7FF"],
dark=["#001C7F", "#017517", "#8C0900",
"#7600A1", "#B8860B", "#006374"],
colorblind=["#0072B2", "#009E73", "#D55E00",
"#CC79A7", "#F0E442", "#56B4E9"],
)
if name is None:
palette = mpl.rcParams["axes.color_cycle"]
elif not isinstance(name, string_types):
palette = name
elif name == "hls":
palette = hls_palette(n_colors)
elif name == "husl":
palette = husl_palette(n_colors)
elif name in seaborn_palettes:
palette = seaborn_palettes[name]
elif name in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
elif name[:-2] in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
else:
raise ValueError("%s is not a valid palette name" % name)
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError("Could not generate a palette for %s" % str(name))
return palette |
def _get_cpu_virtualization(self):
"""get cpu virtualization status."""
try:
cpu_vt = self._get_bios_setting('ProcVirtualization')
except exception.IloCommandNotSupportedError:
return False
if cpu_vt == 'Enabled':
vt_status = True
else:
vt_status = False
return vt_status | get cpu virtualization status. | Below is the the instruction that describes the task:
### Input:
get cpu virtualization status.
### Response:
def _get_cpu_virtualization(self):
"""get cpu virtualization status."""
try:
cpu_vt = self._get_bios_setting('ProcVirtualization')
except exception.IloCommandNotSupportedError:
return False
if cpu_vt == 'Enabled':
vt_status = True
else:
vt_status = False
return vt_status |
def getGroundResolution(self, latitude, level):
'''
returns the ground resolution for based on latitude and zoom level.
'''
latitude = self.clipValue(latitude, self.min_lat, self.max_lat);
mapSize = self.getMapDimensionsByZoomLevel(level)
return math.cos(
latitude * math.pi / 180) * 2 * math.pi * self.earth_radius / \
mapSize | returns the ground resolution for based on latitude and zoom level. | Below is the the instruction that describes the task:
### Input:
returns the ground resolution for based on latitude and zoom level.
### Response:
def getGroundResolution(self, latitude, level):
'''
returns the ground resolution for based on latitude and zoom level.
'''
latitude = self.clipValue(latitude, self.min_lat, self.max_lat);
mapSize = self.getMapDimensionsByZoomLevel(level)
return math.cos(
latitude * math.pi / 180) * 2 * math.pi * self.earth_radius / \
mapSize |
def perform_import(val):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if val is None:
return None
elif isinstance(val, str):
return import_from_string(val)
elif isinstance(val, (list, tuple)):
return [import_from_string(item) for item in val]
return val | If the given setting is a string import notation,
then perform the necessary import or imports. | Below is the the instruction that describes the task:
### Input:
If the given setting is a string import notation,
then perform the necessary import or imports.
### Response:
def perform_import(val):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if val is None:
return None
elif isinstance(val, str):
return import_from_string(val)
elif isinstance(val, (list, tuple)):
return [import_from_string(item) for item in val]
return val |
def cmd_adb(self, *args):
'''
Run adb from the Android SDK.
Args must come after --, or use
--alias to make an alias
'''
self.check_requirements()
self.install_platform()
args = args[0]
if args and args[0] == '--alias':
print('To set up ADB in this shell session, execute:')
print(' alias adb=$(buildozer {} adb --alias 2>&1 >/dev/null)'
.format(self.targetname))
sys.stderr.write(self.adb_cmd + '\n')
else:
self.buildozer.cmd(' '.join([self.adb_cmd] + args)) | Run adb from the Android SDK.
Args must come after --, or use
--alias to make an alias | Below is the the instruction that describes the task:
### Input:
Run adb from the Android SDK.
Args must come after --, or use
--alias to make an alias
### Response:
def cmd_adb(self, *args):
'''
Run adb from the Android SDK.
Args must come after --, or use
--alias to make an alias
'''
self.check_requirements()
self.install_platform()
args = args[0]
if args and args[0] == '--alias':
print('To set up ADB in this shell session, execute:')
print(' alias adb=$(buildozer {} adb --alias 2>&1 >/dev/null)'
.format(self.targetname))
sys.stderr.write(self.adb_cmd + '\n')
else:
self.buildozer.cmd(' '.join([self.adb_cmd] + args)) |
def release(self, args):
"""
Set the Out_Of_Service property to False - to release the I/O point back to
the controller's control.
:param args: String with <addr> <type> <inst>
"""
if not self._started:
raise ApplicationNotStarted("BACnet stack not running - use startApp()")
args = args.split()
addr, obj_type, obj_inst = args[:3]
try:
self.write("{} {} {} outOfService False".format(addr, obj_type, obj_inst))
except NoResponseFromController:
pass
try:
if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)):
raise OutOfServiceSet()
else:
pass # Everything is ok"
except NoResponseFromController:
pass | Set the Out_Of_Service property to False - to release the I/O point back to
the controller's control.
:param args: String with <addr> <type> <inst> | Below is the the instruction that describes the task:
### Input:
Set the Out_Of_Service property to False - to release the I/O point back to
the controller's control.
:param args: String with <addr> <type> <inst>
### Response:
def release(self, args):
"""
Set the Out_Of_Service property to False - to release the I/O point back to
the controller's control.
:param args: String with <addr> <type> <inst>
"""
if not self._started:
raise ApplicationNotStarted("BACnet stack not running - use startApp()")
args = args.split()
addr, obj_type, obj_inst = args[:3]
try:
self.write("{} {} {} outOfService False".format(addr, obj_type, obj_inst))
except NoResponseFromController:
pass
try:
if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)):
raise OutOfServiceSet()
else:
pass # Everything is ok"
except NoResponseFromController:
pass |
def write_modes_to_file(self, filename="mode.dat", plot=True, analyse=True):
"""
Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for).
"""
modes_directory = "./modes_semi_vec/"
if not os.path.isdir(modes_directory):
os.mkdir(modes_directory)
filename = modes_directory + filename
for i, mode in enumerate(self._ms.modes):
filename_mode = self._get_mode_filename(
self._semi_vectorial_method, i, filename
)
self._write_mode_to_file(np.real(mode), filename_mode)
if plot:
if i == 0 and analyse:
A, centre, sigma_2 = anal.fit_gaussian(
self._structure.xc, self._structure.yc, np.abs(mode)
)
subtitle = (
"E_{max} = %.3f, (x_{max}, y_{max}) = (%.3f, %.3f), MFD_{x} = %.3f, "
"MFD_{y} = %.3f"
) % (A, centre[0], centre[1], sigma_2[0], sigma_2[1])
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
subtitle,
sigma_2[0],
sigma_2[1],
centre[0],
centre[1],
wavelength=self._structure._wl,
)
else:
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
wavelength=self._structure._wl,
)
return self.modes | Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for). | Below is the the instruction that describes the task:
### Input:
Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for).
### Response:
def write_modes_to_file(self, filename="mode.dat", plot=True, analyse=True):
"""
Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for).
"""
modes_directory = "./modes_semi_vec/"
if not os.path.isdir(modes_directory):
os.mkdir(modes_directory)
filename = modes_directory + filename
for i, mode in enumerate(self._ms.modes):
filename_mode = self._get_mode_filename(
self._semi_vectorial_method, i, filename
)
self._write_mode_to_file(np.real(mode), filename_mode)
if plot:
if i == 0 and analyse:
A, centre, sigma_2 = anal.fit_gaussian(
self._structure.xc, self._structure.yc, np.abs(mode)
)
subtitle = (
"E_{max} = %.3f, (x_{max}, y_{max}) = (%.3f, %.3f), MFD_{x} = %.3f, "
"MFD_{y} = %.3f"
) % (A, centre[0], centre[1], sigma_2[0], sigma_2[1])
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
subtitle,
sigma_2[0],
sigma_2[1],
centre[0],
centre[1],
wavelength=self._structure._wl,
)
else:
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
wavelength=self._structure._wl,
)
return self.modes |
def _add_existing_weight(self, weight, trainable=None):
"""Calls add_weight() to register but not create an existing weight."""
if trainable is None: trainable = weight.trainable
self.add_weight(name=weight.name, shape=weight.shape, dtype=weight.dtype,
trainable=trainable, getter=lambda *_, **__: weight) | Calls add_weight() to register but not create an existing weight. | Below is the the instruction that describes the task:
### Input:
Calls add_weight() to register but not create an existing weight.
### Response:
def _add_existing_weight(self, weight, trainable=None):
"""Calls add_weight() to register but not create an existing weight."""
if trainable is None: trainable = weight.trainable
self.add_weight(name=weight.name, shape=weight.shape, dtype=weight.dtype,
trainable=trainable, getter=lambda *_, **__: weight) |
def script_dir(pyobject, follow_symlinks=True):
"""Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
"""
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path) | Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory | Below is the the instruction that describes the task:
### Input:
Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
### Response:
def script_dir(pyobject, follow_symlinks=True):
"""Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
"""
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path) |
def vm_monitoring(name, call=None):
'''
Returns the monitoring records for a given virtual machine. A VM name must be
supplied.
The monitoring information returned is a list of VM elements. Each VM element
contains the complete dictionary of the VM with the updated information returned
by the poll action.
.. versionadded:: 2016.3.0
name
The name of the VM for which to gather monitoring records.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_monitoring my-vm
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_monitoring action must be called with -a or --action.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': name}))
response = server.one.vm.monitoring(auth, vm_id)
if response[0] is False:
log.error(
'There was an error retrieving the specified VM\'s monitoring '
'information.'
)
return {}
else:
info = {}
for vm_ in _get_xml(response[1]):
info[vm_.find('ID').text] = _xml_to_dict(vm_)
return info | Returns the monitoring records for a given virtual machine. A VM name must be
supplied.
The monitoring information returned is a list of VM elements. Each VM element
contains the complete dictionary of the VM with the updated information returned
by the poll action.
.. versionadded:: 2016.3.0
name
The name of the VM for which to gather monitoring records.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_monitoring my-vm | Below is the the instruction that describes the task:
### Input:
Returns the monitoring records for a given virtual machine. A VM name must be
supplied.
The monitoring information returned is a list of VM elements. Each VM element
contains the complete dictionary of the VM with the updated information returned
by the poll action.
.. versionadded:: 2016.3.0
name
The name of the VM for which to gather monitoring records.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_monitoring my-vm
### Response:
def vm_monitoring(name, call=None):
'''
Returns the monitoring records for a given virtual machine. A VM name must be
supplied.
The monitoring information returned is a list of VM elements. Each VM element
contains the complete dictionary of the VM with the updated information returned
by the poll action.
.. versionadded:: 2016.3.0
name
The name of the VM for which to gather monitoring records.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_monitoring my-vm
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_monitoring action must be called with -a or --action.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': name}))
response = server.one.vm.monitoring(auth, vm_id)
if response[0] is False:
log.error(
'There was an error retrieving the specified VM\'s monitoring '
'information.'
)
return {}
else:
info = {}
for vm_ in _get_xml(response[1]):
info[vm_.find('ID').text] = _xml_to_dict(vm_)
return info |
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs | returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect) | Below is the the instruction that describes the task:
### Input:
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
### Response:
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs |
def trace_in_process_link(self, link_bytes):
'''Creates a tracer for tracing asynchronous related processing in the same process.
For more information see :meth:`create_in_process_link`.
:param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`.
:rtype: tracers.InProcessLinkTracer
.. versionadded:: 1.1.0
'''
return tracers.InProcessLinkTracer(self._nsdk,
self._nsdk.trace_in_process_link(link_bytes)) | Creates a tracer for tracing asynchronous related processing in the same process.
For more information see :meth:`create_in_process_link`.
:param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`.
:rtype: tracers.InProcessLinkTracer
.. versionadded:: 1.1.0 | Below is the the instruction that describes the task:
### Input:
Creates a tracer for tracing asynchronous related processing in the same process.
For more information see :meth:`create_in_process_link`.
:param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`.
:rtype: tracers.InProcessLinkTracer
.. versionadded:: 1.1.0
### Response:
def trace_in_process_link(self, link_bytes):
'''Creates a tracer for tracing asynchronous related processing in the same process.
For more information see :meth:`create_in_process_link`.
:param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`.
:rtype: tracers.InProcessLinkTracer
.. versionadded:: 1.1.0
'''
return tracers.InProcessLinkTracer(self._nsdk,
self._nsdk.trace_in_process_link(link_bytes)) |
def ptb_raw_data(data_path):
"""Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
return train_data, valid_data, test_data, word_to_id | Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator. | Below is the the instruction that describes the task:
### Input:
Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
### Response:
def ptb_raw_data(data_path):
"""Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
return train_data, valid_data, test_data, word_to_id |
def displayhtml(public_key,
attrs,
use_ssl=False,
error=None):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)"""
error_param = ''
if error:
error_param = '&error=%s' % error
if use_ssl:
server = API_SSL_SERVER
else:
server = API_SERVER
if 'lang' not in attrs:
attrs['lang'] = get_language()[:2]
return render_to_string(
WIDGET_TEMPLATE,
{'api_server': server,
'public_key': public_key,
'error_param': error_param,
'lang': attrs['lang'],
'options': mark_safe(json.dumps(attrs, indent=2))
}) | Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code) | Below is the the instruction that describes the task:
### Input:
Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)
### Response:
def displayhtml(public_key,
attrs,
use_ssl=False,
error=None):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)"""
error_param = ''
if error:
error_param = '&error=%s' % error
if use_ssl:
server = API_SSL_SERVER
else:
server = API_SERVER
if 'lang' not in attrs:
attrs['lang'] = get_language()[:2]
return render_to_string(
WIDGET_TEMPLATE,
{'api_server': server,
'public_key': public_key,
'error_param': error_param,
'lang': attrs['lang'],
'options': mark_safe(json.dumps(attrs, indent=2))
}) |
def _nameFromHeaderInfo(headerInfo, isDecoy, decoyTag):
"""Generates a protein name from headerInfo. If "isDecoy" is True, the
"decoyTag" is added to beginning of the generated protein name.
:param headerInfo: dict, must contain a key "name" or "id"
:param isDecoy: bool, determines if the "decoyTag" is added or not.
:param decoyTag: str, a tag that identifies decoy / reverse protein entries.
:returns: str, protein name
"""
if 'name' in headerInfo:
proteinName = headerInfo['name']
else:
proteinName = headerInfo['id']
if isDecoy:
proteinName = ''.join((decoyTag, proteinName))
return proteinName | Generates a protein name from headerInfo. If "isDecoy" is True, the
"decoyTag" is added to beginning of the generated protein name.
:param headerInfo: dict, must contain a key "name" or "id"
:param isDecoy: bool, determines if the "decoyTag" is added or not.
:param decoyTag: str, a tag that identifies decoy / reverse protein entries.
:returns: str, protein name | Below is the the instruction that describes the task:
### Input:
Generates a protein name from headerInfo. If "isDecoy" is True, the
"decoyTag" is added to beginning of the generated protein name.
:param headerInfo: dict, must contain a key "name" or "id"
:param isDecoy: bool, determines if the "decoyTag" is added or not.
:param decoyTag: str, a tag that identifies decoy / reverse protein entries.
:returns: str, protein name
### Response:
def _nameFromHeaderInfo(headerInfo, isDecoy, decoyTag):
"""Generates a protein name from headerInfo. If "isDecoy" is True, the
"decoyTag" is added to beginning of the generated protein name.
:param headerInfo: dict, must contain a key "name" or "id"
:param isDecoy: bool, determines if the "decoyTag" is added or not.
:param decoyTag: str, a tag that identifies decoy / reverse protein entries.
:returns: str, protein name
"""
if 'name' in headerInfo:
proteinName = headerInfo['name']
else:
proteinName = headerInfo['id']
if isDecoy:
proteinName = ''.join((decoyTag, proteinName))
return proteinName |
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str) | r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect. | Below is the the instruction that describes the task:
### Input:
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
### Response:
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str) |
def to_api_repr(self):
"""Generate a resource for :meth:`_begin`."""
source_refs = [
{
"projectId": table.project,
"datasetId": table.dataset_id,
"tableId": table.table_id,
}
for table in self.sources
]
configuration = self._configuration.to_api_repr()
_helpers._set_sub_prop(configuration, ["copy", "sourceTables"], source_refs)
_helpers._set_sub_prop(
configuration,
["copy", "destinationTable"],
{
"projectId": self.destination.project,
"datasetId": self.destination.dataset_id,
"tableId": self.destination.table_id,
},
)
return {
"jobReference": self._properties["jobReference"],
"configuration": configuration,
} | Generate a resource for :meth:`_begin`. | Below is the the instruction that describes the task:
### Input:
Generate a resource for :meth:`_begin`.
### Response:
def to_api_repr(self):
"""Generate a resource for :meth:`_begin`."""
source_refs = [
{
"projectId": table.project,
"datasetId": table.dataset_id,
"tableId": table.table_id,
}
for table in self.sources
]
configuration = self._configuration.to_api_repr()
_helpers._set_sub_prop(configuration, ["copy", "sourceTables"], source_refs)
_helpers._set_sub_prop(
configuration,
["copy", "destinationTable"],
{
"projectId": self.destination.project,
"datasetId": self.destination.dataset_id,
"tableId": self.destination.table_id,
},
)
return {
"jobReference": self._properties["jobReference"],
"configuration": configuration,
} |
def set_asset(self, asset_id, asset_content_type=None):
"""stub"""
if asset_id is None:
raise NullArgument('asset_id cannot be None')
if not isinstance(asset_id, Id):
raise InvalidArgument('asset_id must be an instance of Id')
if asset_content_type is not None and not isinstance(asset_content_type, Type):
raise InvalidArgument('asset_content_type must be instance of Type')
if asset_content_type is None:
asset_content_type = ''
self.my_osid_object_form._my_map['fileId'] = {
'assetId': str(asset_id),
'assetContentTypeId': str(asset_content_type)
} | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def set_asset(self, asset_id, asset_content_type=None):
"""stub"""
if asset_id is None:
raise NullArgument('asset_id cannot be None')
if not isinstance(asset_id, Id):
raise InvalidArgument('asset_id must be an instance of Id')
if asset_content_type is not None and not isinstance(asset_content_type, Type):
raise InvalidArgument('asset_content_type must be instance of Type')
if asset_content_type is None:
asset_content_type = ''
self.my_osid_object_form._my_map['fileId'] = {
'assetId': str(asset_id),
'assetContentTypeId': str(asset_content_type)
} |
def run(self, data_loaders, workflow, max_epochs, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2), ('val', 1)] means
running 2 epochs for training and 1 epoch for validation,
iteratively.
max_epochs (int): Total training epochs.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
self._max_epochs = max_epochs
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs)
self.call_hook('before_run')
while self.epoch < max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
'runner has no method named "{}" to run an epoch'.
format(mode))
epoch_runner = getattr(self, mode)
elif callable(mode): # custom train()
epoch_runner = mode
else:
raise TypeError('mode in workflow must be a str or '
'callable function, not {}'.format(
type(mode)))
for _ in range(epochs):
if mode == 'train' and self.epoch >= max_epochs:
return
epoch_runner(data_loaders[i], **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run') | Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2), ('val', 1)] means
running 2 epochs for training and 1 epoch for validation,
iteratively.
max_epochs (int): Total training epochs. | Below is the the instruction that describes the task:
### Input:
Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2), ('val', 1)] means
running 2 epochs for training and 1 epoch for validation,
iteratively.
max_epochs (int): Total training epochs.
### Response:
def run(self, data_loaders, workflow, max_epochs, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2), ('val', 1)] means
running 2 epochs for training and 1 epoch for validation,
iteratively.
max_epochs (int): Total training epochs.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
self._max_epochs = max_epochs
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs)
self.call_hook('before_run')
while self.epoch < max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
'runner has no method named "{}" to run an epoch'.
format(mode))
epoch_runner = getattr(self, mode)
elif callable(mode): # custom train()
epoch_runner = mode
else:
raise TypeError('mode in workflow must be a str or '
'callable function, not {}'.format(
type(mode)))
for _ in range(epochs):
if mode == 'train' and self.epoch >= max_epochs:
return
epoch_runner(data_loaders[i], **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run') |
def Read(self, timeout=None):
'''
Reads the context menu
:param timeout: Optional. Any value other than None indicates a non-blocking read
:return:
'''
# if not self.Shown:
# self.Shown = True
# self.TrayIcon.show()
timeout1 = timeout
# if timeout1 == 0:
# timeout1 = 1
# if wx.GetApp():
# wx.GetApp().ProcessPendingEvents()
# self.App.ProcessPendingEvents()
# self.App.ProcessIdle()
# return self.MenuItemChosen
if timeout1 is not None:
try:
self.timer = wx.Timer(self.TaskBarIcon)
self.TaskBarIcon.Bind(wx.EVT_TIMER, self.timer_timeout)
self.timer.Start(milliseconds=timeout1, oneShot=wx.TIMER_ONE_SHOT)
except:
print('*** Got error in Read ***')
self.RunningMainLoop = True
self.App.MainLoop()
self.RunningMainLoop = False
if self.timer:
self.timer.Stop()
self.MenuItemChosen = self.TaskBarIcon.menu_item_chosen
return self.MenuItemChosen | Reads the context menu
:param timeout: Optional. Any value other than None indicates a non-blocking read
:return: | Below is the the instruction that describes the task:
### Input:
Reads the context menu
:param timeout: Optional. Any value other than None indicates a non-blocking read
:return:
### Response:
def Read(self, timeout=None):
'''
Reads the context menu
:param timeout: Optional. Any value other than None indicates a non-blocking read
:return:
'''
# if not self.Shown:
# self.Shown = True
# self.TrayIcon.show()
timeout1 = timeout
# if timeout1 == 0:
# timeout1 = 1
# if wx.GetApp():
# wx.GetApp().ProcessPendingEvents()
# self.App.ProcessPendingEvents()
# self.App.ProcessIdle()
# return self.MenuItemChosen
if timeout1 is not None:
try:
self.timer = wx.Timer(self.TaskBarIcon)
self.TaskBarIcon.Bind(wx.EVT_TIMER, self.timer_timeout)
self.timer.Start(milliseconds=timeout1, oneShot=wx.TIMER_ONE_SHOT)
except:
print('*** Got error in Read ***')
self.RunningMainLoop = True
self.App.MainLoop()
self.RunningMainLoop = False
if self.timer:
self.timer.Stop()
self.MenuItemChosen = self.TaskBarIcon.menu_item_chosen
return self.MenuItemChosen |
def train_epoch(self, epoch_info, source: 'vel.api.Source', interactive=True):
""" Run a single training epoch """
self.train()
if interactive:
iterator = tqdm.tqdm(source.train_loader(), desc="Training", unit="iter", file=sys.stdout)
else:
iterator = source.train_loader()
for batch_idx, (data, target) in enumerate(iterator):
batch_info = BatchInfo(epoch_info, batch_idx)
batch_info.on_batch_begin()
self.train_batch(batch_info, data, target)
batch_info.on_batch_end()
iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) | Run a single training epoch | Below is the the instruction that describes the task:
### Input:
Run a single training epoch
### Response:
def train_epoch(self, epoch_info, source: 'vel.api.Source', interactive=True):
""" Run a single training epoch """
self.train()
if interactive:
iterator = tqdm.tqdm(source.train_loader(), desc="Training", unit="iter", file=sys.stdout)
else:
iterator = source.train_loader()
for batch_idx, (data, target) in enumerate(iterator):
batch_info = BatchInfo(epoch_info, batch_idx)
batch_info.on_batch_begin()
self.train_batch(batch_info, data, target)
batch_info.on_batch_end()
iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) |
def from_proto(brain_param_proto):
"""
Converts brain parameter proto to BrainParameter object.
:param brain_param_proto: protobuf object.
:return: BrainParameter object.
"""
resolution = [{
"height": x.height,
"width": x.width,
"blackAndWhite": x.gray_scale
} for x in brain_param_proto.camera_resolutions]
brain_params = BrainParameters(brain_param_proto.brain_name,
brain_param_proto.vector_observation_size,
brain_param_proto.num_stacked_vector_observations,
resolution,
list(brain_param_proto.vector_action_size),
list(brain_param_proto.vector_action_descriptions),
brain_param_proto.vector_action_space_type)
return brain_params | Converts brain parameter proto to BrainParameter object.
:param brain_param_proto: protobuf object.
:return: BrainParameter object. | Below is the the instruction that describes the task:
### Input:
Converts brain parameter proto to BrainParameter object.
:param brain_param_proto: protobuf object.
:return: BrainParameter object.
### Response:
def from_proto(brain_param_proto):
"""
Converts brain parameter proto to BrainParameter object.
:param brain_param_proto: protobuf object.
:return: BrainParameter object.
"""
resolution = [{
"height": x.height,
"width": x.width,
"blackAndWhite": x.gray_scale
} for x in brain_param_proto.camera_resolutions]
brain_params = BrainParameters(brain_param_proto.brain_name,
brain_param_proto.vector_observation_size,
brain_param_proto.num_stacked_vector_observations,
resolution,
list(brain_param_proto.vector_action_size),
list(brain_param_proto.vector_action_descriptions),
brain_param_proto.vector_action_space_type)
return brain_params |
def fulltext_scan_ids(self, query_id=None, query_fc=None,
preserve_order=True, indexes=None):
'''Fulltext search for identifiers.
Yields an iterable of triples (score, identifier)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:rtype: Iterable of ``(score, content_id)``
'''
it = self._fulltext_scan(query_id, query_fc, feature_names=False,
preserve_order=preserve_order,
indexes=indexes)
for hit in it:
yield hit['_score'], did(hit['_id']) | Fulltext search for identifiers.
Yields an iterable of triples (score, identifier)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:rtype: Iterable of ``(score, content_id)`` | Below is the the instruction that describes the task:
### Input:
Fulltext search for identifiers.
Yields an iterable of triples (score, identifier)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:rtype: Iterable of ``(score, content_id)``
### Response:
def fulltext_scan_ids(self, query_id=None, query_fc=None,
preserve_order=True, indexes=None):
'''Fulltext search for identifiers.
Yields an iterable of triples (score, identifier)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:rtype: Iterable of ``(score, content_id)``
'''
it = self._fulltext_scan(query_id, query_fc, feature_names=False,
preserve_order=preserve_order,
indexes=indexes)
for hit in it:
yield hit['_score'], did(hit['_id']) |
def read_notes_file(file_path):
"""
Returns the contents of a notes file.
If the notes file does not exist, None is returned
"""
if not os.path.isfile(file_path):
return None
with open(file_path, 'r', encoding=_default_encoding) as f:
return f.read() | Returns the contents of a notes file.
If the notes file does not exist, None is returned | Below is the the instruction that describes the task:
### Input:
Returns the contents of a notes file.
If the notes file does not exist, None is returned
### Response:
def read_notes_file(file_path):
"""
Returns the contents of a notes file.
If the notes file does not exist, None is returned
"""
if not os.path.isfile(file_path):
return None
with open(file_path, 'r', encoding=_default_encoding) as f:
return f.read() |
def _validate_measure_sampling(self, experiment):
"""Determine if measure sampling is allowed for an experiment
Args:
experiment (QobjExperiment): a qobj experiment.
"""
# If shots=1 we should disable measure sampling.
# This is also required for statevector simulator to return the
# correct final statevector without silently dropping final measurements.
if self._shots <= 1:
self._sample_measure = False
return
# Check for config flag
if hasattr(experiment.config, 'allows_measure_sampling'):
self._sample_measure = experiment.config.allows_measure_sampling
# If flag isn't found do a simple test to see if a circuit contains
# no reset instructions, and no gates instructions after
# the first measure.
else:
measure_flag = False
for instruction in experiment.instructions:
# If circuit contains reset operations we cannot sample
if instruction.name == "reset":
self._sample_measure = False
return
# If circuit contains a measure option then we can
# sample only if all following operations are measures
if measure_flag:
# If we find a non-measure instruction
# we cannot do measure sampling
if instruction.name not in ["measure", "barrier", "id", "u0"]:
self._sample_measure = False
return
elif instruction.name == "measure":
measure_flag = True
# If we made it to the end of the circuit without returning
# measure sampling is allowed
self._sample_measure = True | Determine if measure sampling is allowed for an experiment
Args:
experiment (QobjExperiment): a qobj experiment. | Below is the the instruction that describes the task:
### Input:
Determine if measure sampling is allowed for an experiment
Args:
experiment (QobjExperiment): a qobj experiment.
### Response:
def _validate_measure_sampling(self, experiment):
"""Determine if measure sampling is allowed for an experiment
Args:
experiment (QobjExperiment): a qobj experiment.
"""
# If shots=1 we should disable measure sampling.
# This is also required for statevector simulator to return the
# correct final statevector without silently dropping final measurements.
if self._shots <= 1:
self._sample_measure = False
return
# Check for config flag
if hasattr(experiment.config, 'allows_measure_sampling'):
self._sample_measure = experiment.config.allows_measure_sampling
# If flag isn't found do a simple test to see if a circuit contains
# no reset instructions, and no gates instructions after
# the first measure.
else:
measure_flag = False
for instruction in experiment.instructions:
# If circuit contains reset operations we cannot sample
if instruction.name == "reset":
self._sample_measure = False
return
# If circuit contains a measure option then we can
# sample only if all following operations are measures
if measure_flag:
# If we find a non-measure instruction
# we cannot do measure sampling
if instruction.name not in ["measure", "barrier", "id", "u0"]:
self._sample_measure = False
return
elif instruction.name == "measure":
measure_flag = True
# If we made it to the end of the circuit without returning
# measure sampling is allowed
self._sample_measure = True |
def is_active(self, name):
"""
Returns True if plugin exists and is active.
If plugin does not exist, it returns None
:param name: plugin name
:return: boolean or None
"""
if name in self._plugins.keys():
return self._plugins["name"].active
return None | Returns True if plugin exists and is active.
If plugin does not exist, it returns None
:param name: plugin name
:return: boolean or None | Below is the the instruction that describes the task:
### Input:
Returns True if plugin exists and is active.
If plugin does not exist, it returns None
:param name: plugin name
:return: boolean or None
### Response:
def is_active(self, name):
"""
Returns True if plugin exists and is active.
If plugin does not exist, it returns None
:param name: plugin name
:return: boolean or None
"""
if name in self._plugins.keys():
return self._plugins["name"].active
return None |
def get_items(self):
"""Get the items contained in `self`.
:return: the items contained.
:returntype: `list` of `DiscoItem`"""
ret=[]
l=self.xpath_ctxt.xpathEval("d:item")
if l is not None:
for i in l:
ret.append(DiscoItem(self, i))
return ret | Get the items contained in `self`.
:return: the items contained.
:returntype: `list` of `DiscoItem` | Below is the the instruction that describes the task:
### Input:
Get the items contained in `self`.
:return: the items contained.
:returntype: `list` of `DiscoItem`
### Response:
def get_items(self):
"""Get the items contained in `self`.
:return: the items contained.
:returntype: `list` of `DiscoItem`"""
ret=[]
l=self.xpath_ctxt.xpathEval("d:item")
if l is not None:
for i in l:
ret.append(DiscoItem(self, i))
return ret |
def _Pairs(data):
"""dictionary -> list of pairs"""
keys = sorted(data)
return [{'@key': k, '@value': data[k]} for k in keys] | dictionary -> list of pairs | Below is the the instruction that describes the task:
### Input:
dictionary -> list of pairs
### Response:
def _Pairs(data):
"""dictionary -> list of pairs"""
keys = sorted(data)
return [{'@key': k, '@value': data[k]} for k in keys] |
def probabilities(self, choosers, alternatives, filter_tables=True):
"""
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
"""
logger.debug('start: calculate probabilities for LCM model {}'.format(
self.name))
self.assert_fitted()
if filter_tables:
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if self.prediction_sample_size is not None:
sample_size = self.prediction_sample_size
else:
sample_size = len(alternatives)
if self.probability_mode == 'single_chooser':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers.head(1), alternatives, sample_size)
elif self.probability_mode == 'full_product':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers, alternatives, sample_size)
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
merged = util.apply_filter_query(
merged, self.interaction_predict_filters)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.as_matrix().shape[0]:
raise ModelEvaluationError(
'Simulated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
# get the order of the coefficients in the same order as the
# columns in the design matrix
coeffs = [self.fit_parameters['Coefficient'][x]
for x in model_design.columns]
# probabilities are returned from mnl_simulate as a 2d array
# with choosers along rows and alternatives along columns
if self.probability_mode == 'single_chooser':
numalts = len(merged)
else:
numalts = sample_size
probabilities = mnl.mnl_simulate(
model_design.as_matrix(),
coeffs,
numalts=numalts, returnprobs=True)
# want to turn probabilities into a Series with a MultiIndex
# of chooser IDs and alternative IDs.
# indexing by chooser ID will get you the probabilities
# across alternatives for that chooser
mi = pd.MultiIndex.from_arrays(
[merged['join_index'].values, merged.index.values],
names=('chooser_id', 'alternative_id'))
probabilities = pd.Series(probabilities.flatten(), index=mi)
logger.debug('finish: calculate probabilities for LCM model {}'.format(
self.name))
return probabilities | Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index. | Below is the the instruction that describes the task:
### Input:
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
### Response:
def probabilities(self, choosers, alternatives, filter_tables=True):
"""
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
"""
logger.debug('start: calculate probabilities for LCM model {}'.format(
self.name))
self.assert_fitted()
if filter_tables:
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if self.prediction_sample_size is not None:
sample_size = self.prediction_sample_size
else:
sample_size = len(alternatives)
if self.probability_mode == 'single_chooser':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers.head(1), alternatives, sample_size)
elif self.probability_mode == 'full_product':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers, alternatives, sample_size)
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
merged = util.apply_filter_query(
merged, self.interaction_predict_filters)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.as_matrix().shape[0]:
raise ModelEvaluationError(
'Simulated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
# get the order of the coefficients in the same order as the
# columns in the design matrix
coeffs = [self.fit_parameters['Coefficient'][x]
for x in model_design.columns]
# probabilities are returned from mnl_simulate as a 2d array
# with choosers along rows and alternatives along columns
if self.probability_mode == 'single_chooser':
numalts = len(merged)
else:
numalts = sample_size
probabilities = mnl.mnl_simulate(
model_design.as_matrix(),
coeffs,
numalts=numalts, returnprobs=True)
# want to turn probabilities into a Series with a MultiIndex
# of chooser IDs and alternative IDs.
# indexing by chooser ID will get you the probabilities
# across alternatives for that chooser
mi = pd.MultiIndex.from_arrays(
[merged['join_index'].values, merged.index.values],
names=('chooser_id', 'alternative_id'))
probabilities = pd.Series(probabilities.flatten(), index=mi)
logger.debug('finish: calculate probabilities for LCM model {}'.format(
self.name))
return probabilities |
def get_book_progress(self, asin):
"""Returns the progress data available for a book.
NOTE: A summary of the two progress formats can be found in the
docstring for `ReadingProgress`.
Args:
asin: The asin of the book to be queried.
Returns:
A `ReadingProgress` instance corresponding to the book associated with
`asin`.
"""
kbp = self._get_api_call('get_book_progress', '"%s"' % asin)
return KindleCloudReaderAPI._kbp_to_progress(kbp) | Returns the progress data available for a book.
NOTE: A summary of the two progress formats can be found in the
docstring for `ReadingProgress`.
Args:
asin: The asin of the book to be queried.
Returns:
A `ReadingProgress` instance corresponding to the book associated with
`asin`. | Below is the the instruction that describes the task:
### Input:
Returns the progress data available for a book.
NOTE: A summary of the two progress formats can be found in the
docstring for `ReadingProgress`.
Args:
asin: The asin of the book to be queried.
Returns:
A `ReadingProgress` instance corresponding to the book associated with
`asin`.
### Response:
def get_book_progress(self, asin):
"""Returns the progress data available for a book.
NOTE: A summary of the two progress formats can be found in the
docstring for `ReadingProgress`.
Args:
asin: The asin of the book to be queried.
Returns:
A `ReadingProgress` instance corresponding to the book associated with
`asin`.
"""
kbp = self._get_api_call('get_book_progress', '"%s"' % asin)
return KindleCloudReaderAPI._kbp_to_progress(kbp) |
def query(dataset_key, query, query_type='sql', profile='default',
parameters=None, **kwargs):
"""Query an existing dataset
:param dataset_key: Dataset identifier, in the form of owner/id or of a url
:type dataset_key: str
:param query: SQL or SPARQL query
:type query: str
:param query_type: The type of the query. Must be either 'sql' or 'sparql'.
(Default value = 'sql')
:type query_type: {'sql', 'sparql'}, optional
:param parameters: parameters to the query - if SPARQL query, this should
be a dict containing named parameters, if SQL query, then this should
be a list containing positional parameters. Boolean values will be
converted to xsd:boolean, Integer values to xsd:integer, and other
Numeric values to xsd:decimal. anything else is treated as a String
literal (Default value = None)
:type parameters: query parameters, optional
:param profile: Configuration profile (account) to use.
(Default value = 'default')
:type profile: str, optional
:returns: Object containing the results of the query
:rtype: Results
:raises RuntimeError: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> results = dw.query(
... 'jonloyens/an-intro-to-dataworld-dataset',
... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` '
... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name')
>>> df = results.dataframe
>>> df.shape
(8, 6)
"""
return _get_instance(profile, **kwargs).query(dataset_key, query,
query_type=query_type,
parameters=parameters,
**kwargs) | Query an existing dataset
:param dataset_key: Dataset identifier, in the form of owner/id or of a url
:type dataset_key: str
:param query: SQL or SPARQL query
:type query: str
:param query_type: The type of the query. Must be either 'sql' or 'sparql'.
(Default value = 'sql')
:type query_type: {'sql', 'sparql'}, optional
:param parameters: parameters to the query - if SPARQL query, this should
be a dict containing named parameters, if SQL query, then this should
be a list containing positional parameters. Boolean values will be
converted to xsd:boolean, Integer values to xsd:integer, and other
Numeric values to xsd:decimal. anything else is treated as a String
literal (Default value = None)
:type parameters: query parameters, optional
:param profile: Configuration profile (account) to use.
(Default value = 'default')
:type profile: str, optional
:returns: Object containing the results of the query
:rtype: Results
:raises RuntimeError: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> results = dw.query(
... 'jonloyens/an-intro-to-dataworld-dataset',
... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` '
... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name')
>>> df = results.dataframe
>>> df.shape
(8, 6) | Below is the the instruction that describes the task:
### Input:
Query an existing dataset
:param dataset_key: Dataset identifier, in the form of owner/id or of a url
:type dataset_key: str
:param query: SQL or SPARQL query
:type query: str
:param query_type: The type of the query. Must be either 'sql' or 'sparql'.
(Default value = 'sql')
:type query_type: {'sql', 'sparql'}, optional
:param parameters: parameters to the query - if SPARQL query, this should
be a dict containing named parameters, if SQL query, then this should
be a list containing positional parameters. Boolean values will be
converted to xsd:boolean, Integer values to xsd:integer, and other
Numeric values to xsd:decimal. anything else is treated as a String
literal (Default value = None)
:type parameters: query parameters, optional
:param profile: Configuration profile (account) to use.
(Default value = 'default')
:type profile: str, optional
:returns: Object containing the results of the query
:rtype: Results
:raises RuntimeError: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> results = dw.query(
... 'jonloyens/an-intro-to-dataworld-dataset',
... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` '
... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name')
>>> df = results.dataframe
>>> df.shape
(8, 6)
### Response:
def query(dataset_key, query, query_type='sql', profile='default',
parameters=None, **kwargs):
"""Query an existing dataset
:param dataset_key: Dataset identifier, in the form of owner/id or of a url
:type dataset_key: str
:param query: SQL or SPARQL query
:type query: str
:param query_type: The type of the query. Must be either 'sql' or 'sparql'.
(Default value = 'sql')
:type query_type: {'sql', 'sparql'}, optional
:param parameters: parameters to the query - if SPARQL query, this should
be a dict containing named parameters, if SQL query, then this should
be a list containing positional parameters. Boolean values will be
converted to xsd:boolean, Integer values to xsd:integer, and other
Numeric values to xsd:decimal. anything else is treated as a String
literal (Default value = None)
:type parameters: query parameters, optional
:param profile: Configuration profile (account) to use.
(Default value = 'default')
:type profile: str, optional
:returns: Object containing the results of the query
:rtype: Results
:raises RuntimeError: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> results = dw.query(
... 'jonloyens/an-intro-to-dataworld-dataset',
... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` '
... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name')
>>> df = results.dataframe
>>> df.shape
(8, 6)
"""
return _get_instance(profile, **kwargs).query(dataset_key, query,
query_type=query_type,
parameters=parameters,
**kwargs) |
def download(self, local, remote):
"""
Performs synchronization from a remote file to a local file. The
remote path is the source and the local path is the destination.
"""
self.sync(RemoteFile(remote, self.api), LocalFile(local)) | Performs synchronization from a remote file to a local file. The
remote path is the source and the local path is the destination. | Below is the the instruction that describes the task:
### Input:
Performs synchronization from a remote file to a local file. The
remote path is the source and the local path is the destination.
### Response:
def download(self, local, remote):
"""
Performs synchronization from a remote file to a local file. The
remote path is the source and the local path is the destination.
"""
self.sync(RemoteFile(remote, self.api), LocalFile(local)) |
def setup(app):
"""Setup sphinx-gallery sphinx extension"""
app.add_config_value('plot_gallery', True, 'html')
app.add_config_value('abort_on_example_error', False, 'html')
app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html')
app.add_stylesheet('gallery.css')
app.connect('builder-inited', generate_gallery_rst)
app.connect('build-finished', embed_code_links) | Setup sphinx-gallery sphinx extension | Below is the the instruction that describes the task:
### Input:
Setup sphinx-gallery sphinx extension
### Response:
def setup(app):
"""Setup sphinx-gallery sphinx extension"""
app.add_config_value('plot_gallery', True, 'html')
app.add_config_value('abort_on_example_error', False, 'html')
app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html')
app.add_stylesheet('gallery.css')
app.connect('builder-inited', generate_gallery_rst)
app.connect('build-finished', embed_code_links) |
def rmp_pixel_deg_xys(vecX, vecY, vecPrfSd, tplPngSize,
varExtXmin, varExtXmax, varExtYmin, varExtYmax):
"""Remap x, y, sigma parameters from pixel to degree.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in pixels
vecY : 1D numpy array
Array with possible y parametrs in pixels
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixels
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : int
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree
"""
# Remap modelled x-positions of the pRFs:
vecXdgr = rmp_rng(vecX, varExtXmin, varExtXmax, varOldThrMin=0.0,
varOldAbsMax=(tplPngSize[0] - 1))
# Remap modelled y-positions of the pRFs:
vecYdgr = rmp_rng(vecY, varExtYmin, varExtYmax, varOldThrMin=0.0,
varOldAbsMax=(tplPngSize[1] - 1))
# We calculate the scaling factor from pixels to degrees of visual angle to
# separately for the x- and the y-directions (the two should be the same).
varPix2DgrX = np.divide((varExtXmax - varExtXmin), tplPngSize[0])
varPix2DgrY = np.divide((varExtYmax - varExtYmin), tplPngSize[1])
# Check whether varDgr2PixX and varDgr2PixY are similar:
strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \
'stimulus space (in pixels) do not agree'
assert 0.5 > np.absolute((varPix2DgrX - varPix2DgrY)), strErrMsg
# Convert prf sizes from degrees of visual angles to pixel
vecPrfSdDgr = np.multiply(vecPrfSd, varPix2DgrX)
# Return new values.
return vecXdgr, vecYdgr, vecPrfSdDgr | Remap x, y, sigma parameters from pixel to degree.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in pixels
vecY : 1D numpy array
Array with possible y parametrs in pixels
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixels
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : int
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree | Below is the the instruction that describes the task:
### Input:
Remap x, y, sigma parameters from pixel to degree.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in pixels
vecY : 1D numpy array
Array with possible y parametrs in pixels
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixels
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : int
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree
### Response:
def rmp_pixel_deg_xys(vecX, vecY, vecPrfSd, tplPngSize,
varExtXmin, varExtXmax, varExtYmin, varExtYmax):
"""Remap x, y, sigma parameters from pixel to degree.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in pixels
vecY : 1D numpy array
Array with possible y parametrs in pixels
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixels
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : int
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree
"""
# Remap modelled x-positions of the pRFs:
vecXdgr = rmp_rng(vecX, varExtXmin, varExtXmax, varOldThrMin=0.0,
varOldAbsMax=(tplPngSize[0] - 1))
# Remap modelled y-positions of the pRFs:
vecYdgr = rmp_rng(vecY, varExtYmin, varExtYmax, varOldThrMin=0.0,
varOldAbsMax=(tplPngSize[1] - 1))
# We calculate the scaling factor from pixels to degrees of visual angle to
# separately for the x- and the y-directions (the two should be the same).
varPix2DgrX = np.divide((varExtXmax - varExtXmin), tplPngSize[0])
varPix2DgrY = np.divide((varExtYmax - varExtYmin), tplPngSize[1])
# Check whether varDgr2PixX and varDgr2PixY are similar:
strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \
'stimulus space (in pixels) do not agree'
assert 0.5 > np.absolute((varPix2DgrX - varPix2DgrY)), strErrMsg
# Convert prf sizes from degrees of visual angles to pixel
vecPrfSdDgr = np.multiply(vecPrfSd, varPix2DgrX)
# Return new values.
return vecXdgr, vecYdgr, vecPrfSdDgr |
def page(self, to=values.unset, from_=values.unset,
date_sent_before=values.unset, date_sent=values.unset,
date_sent_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of MessageInstance records from the API.
Request is executed immediately
:param unicode to: Filter by messages sent to this number
:param unicode from_: Filter by from number
:param datetime date_sent_before: Filter by date sent
:param datetime date_sent: Filter by date sent
:param datetime date_sent_after: Filter by date sent
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of MessageInstance
:rtype: twilio.rest.api.v2010.account.message.MessagePage
"""
params = values.of({
'To': to,
'From': from_,
'DateSent<': serialize.iso8601_datetime(date_sent_before),
'DateSent': serialize.iso8601_datetime(date_sent),
'DateSent>': serialize.iso8601_datetime(date_sent_after),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return MessagePage(self._version, response, self._solution) | Retrieve a single page of MessageInstance records from the API.
Request is executed immediately
:param unicode to: Filter by messages sent to this number
:param unicode from_: Filter by from number
:param datetime date_sent_before: Filter by date sent
:param datetime date_sent: Filter by date sent
:param datetime date_sent_after: Filter by date sent
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of MessageInstance
:rtype: twilio.rest.api.v2010.account.message.MessagePage | Below is the the instruction that describes the task:
### Input:
Retrieve a single page of MessageInstance records from the API.
Request is executed immediately
:param unicode to: Filter by messages sent to this number
:param unicode from_: Filter by from number
:param datetime date_sent_before: Filter by date sent
:param datetime date_sent: Filter by date sent
:param datetime date_sent_after: Filter by date sent
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of MessageInstance
:rtype: twilio.rest.api.v2010.account.message.MessagePage
### Response:
def page(self, to=values.unset, from_=values.unset,
date_sent_before=values.unset, date_sent=values.unset,
date_sent_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of MessageInstance records from the API.
Request is executed immediately
:param unicode to: Filter by messages sent to this number
:param unicode from_: Filter by from number
:param datetime date_sent_before: Filter by date sent
:param datetime date_sent: Filter by date sent
:param datetime date_sent_after: Filter by date sent
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of MessageInstance
:rtype: twilio.rest.api.v2010.account.message.MessagePage
"""
params = values.of({
'To': to,
'From': from_,
'DateSent<': serialize.iso8601_datetime(date_sent_before),
'DateSent': serialize.iso8601_datetime(date_sent),
'DateSent>': serialize.iso8601_datetime(date_sent_after),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return MessagePage(self._version, response, self._solution) |
def offset_random_rgb(seed, amount=1):
"""
Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized
offset from the seed.
:param seed:
:param amount:
:return:
"""
r, g, b = seed
results = []
for _ in range(amount):
base_val = ((r + g + b) / 3) + 1 # Add one to eliminate case where the base value would otherwise be 0
new_val = base_val + (random.random() * rgb_max_val / 5) # Randomly offset with an arbitrary multiplier
ratio = new_val / base_val
results.append((min(int(r*ratio), rgb_max_val), min(int(g*ratio), rgb_max_val), min(int(b*ratio), rgb_max_val)))
return results[0] if len(results) > 1 else results | Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized
offset from the seed.
:param seed:
:param amount:
:return: | Below is the the instruction that describes the task:
### Input:
Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized
offset from the seed.
:param seed:
:param amount:
:return:
### Response:
def offset_random_rgb(seed, amount=1):
"""
Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized
offset from the seed.
:param seed:
:param amount:
:return:
"""
r, g, b = seed
results = []
for _ in range(amount):
base_val = ((r + g + b) / 3) + 1 # Add one to eliminate case where the base value would otherwise be 0
new_val = base_val + (random.random() * rgb_max_val / 5) # Randomly offset with an arbitrary multiplier
ratio = new_val / base_val
results.append((min(int(r*ratio), rgb_max_val), min(int(g*ratio), rgb_max_val), min(int(b*ratio), rgb_max_val)))
return results[0] if len(results) > 1 else results |
def condition(condition=None, statement=None, _else=None, **kwargs):
"""
Run an statement if input condition is checked and return statement result.
:param condition: condition to check.
:type condition: str or dict
:param statement: statement to process if condition is checked.
:type statement: str or dict
:param _else: else statement.
:type _else: str or dict
:param kwargs: condition and statement additional parameters.
:return: statement result.
"""
result = None
checked = False
if condition is not None:
checked = run(condition, **kwargs)
if checked: # if condition is checked
if statement is not None: # process statement
result = run(statement, **kwargs)
elif _else is not None: # else process _else statement
result = run(_else, **kwargs)
return result | Run an statement if input condition is checked and return statement result.
:param condition: condition to check.
:type condition: str or dict
:param statement: statement to process if condition is checked.
:type statement: str or dict
:param _else: else statement.
:type _else: str or dict
:param kwargs: condition and statement additional parameters.
:return: statement result. | Below is the the instruction that describes the task:
### Input:
Run an statement if input condition is checked and return statement result.
:param condition: condition to check.
:type condition: str or dict
:param statement: statement to process if condition is checked.
:type statement: str or dict
:param _else: else statement.
:type _else: str or dict
:param kwargs: condition and statement additional parameters.
:return: statement result.
### Response:
def condition(condition=None, statement=None, _else=None, **kwargs):
"""
Run an statement if input condition is checked and return statement result.
:param condition: condition to check.
:type condition: str or dict
:param statement: statement to process if condition is checked.
:type statement: str or dict
:param _else: else statement.
:type _else: str or dict
:param kwargs: condition and statement additional parameters.
:return: statement result.
"""
result = None
checked = False
if condition is not None:
checked = run(condition, **kwargs)
if checked: # if condition is checked
if statement is not None: # process statement
result = run(statement, **kwargs)
elif _else is not None: # else process _else statement
result = run(_else, **kwargs)
return result |
def _build(self):
"""Connects the module to the graph.
Returns:
The learnable state, which has the same type, structure and shape as
the `initial_state` passed to the constructor.
"""
flat_initial_state = nest.flatten(self._initial_state)
if self._mask is not None:
flat_mask = nest.flatten(self._mask)
flat_learnable_state = [
_single_learnable_state(state, state_id=i, learnable=mask)
for i, (state, mask) in enumerate(zip(flat_initial_state, flat_mask))]
else:
flat_learnable_state = [_single_learnable_state(state, state_id=i)
for i, state in enumerate(flat_initial_state)]
return nest.pack_sequence_as(structure=self._initial_state,
flat_sequence=flat_learnable_state) | Connects the module to the graph.
Returns:
The learnable state, which has the same type, structure and shape as
the `initial_state` passed to the constructor. | Below is the the instruction that describes the task:
### Input:
Connects the module to the graph.
Returns:
The learnable state, which has the same type, structure and shape as
the `initial_state` passed to the constructor.
### Response:
def _build(self):
"""Connects the module to the graph.
Returns:
The learnable state, which has the same type, structure and shape as
the `initial_state` passed to the constructor.
"""
flat_initial_state = nest.flatten(self._initial_state)
if self._mask is not None:
flat_mask = nest.flatten(self._mask)
flat_learnable_state = [
_single_learnable_state(state, state_id=i, learnable=mask)
for i, (state, mask) in enumerate(zip(flat_initial_state, flat_mask))]
else:
flat_learnable_state = [_single_learnable_state(state, state_id=i)
for i, state in enumerate(flat_initial_state)]
return nest.pack_sequence_as(structure=self._initial_state,
flat_sequence=flat_learnable_state) |
def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidFragment, self).init_widget()
f = self.fragment
f.setFragmentListener(f.getId())
f.onCreateView.connect(self.on_create_view)
f.onDestroyView.connect(self.on_destroy_view) | Initialize the underlying widget. | Below is the the instruction that describes the task:
### Input:
Initialize the underlying widget.
### Response:
def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidFragment, self).init_widget()
f = self.fragment
f.setFragmentListener(f.getId())
f.onCreateView.connect(self.on_create_view)
f.onDestroyView.connect(self.on_destroy_view) |
def update(self, spec, document, upsert=False, manipulate=False,
safe=True, multi=False, callback=None, **kwargs):
"""Update a document(s) in this collection.
Raises :class:`TypeError` if either `spec` or `document` is
not an instance of ``dict`` or `upsert` is not an instance of
``bool``. If `safe` is ``True`` then the update will be
checked for errors, raising
:class:`~pymongo.errors.OperationFailure` if one
occurred. Safe updates require a response from the database,
while normal updates do not - thus, setting `safe` to ``True``
will negatively impact performance.
There are many useful `update modifiers`_ which can be used
when performing updates. For example, here we use the
``"$set"`` modifier to modify some fields in a matching
document:
.. doctest::
>>> db.test.insert({"x": "y", "a": "b"})
ObjectId('...')
>>> list(db.test.find())
[{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}]
>>> db.test.update({"x": "y"}, {"$set": {"a": "c"}})
>>> list(db.test.find())
[{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}]
If `safe` is ``True`` returns the response to the *lastError*
command. Otherwise, returns ``None``.
# Any additional keyword arguments imply ``safe=True``, and will
# be used as options for the resultant `getLastError`
# command. For example, to wait for replication to 3 nodes, pass
# ``w=3``.
:Parameters:
- `spec`: a ``dict`` or :class:`~bson.son.SON` instance
specifying elements which must be present for a document
to be updated
- `document`: a ``dict`` or :class:`~bson.son.SON`
instance specifying the document to be used for the update
or (in the case of an upsert) insert - see docs on MongoDB
`update modifiers`_
- `upsert` (optional): perform an upsert if ``True``
- `manipulate` (optional): manipulate the document before
updating? If ``True`` all instances of
:mod:`~pymongo.son_manipulator.SONManipulator` added to
this :class:`~pymongo.database.Database` will be applied
to the document before performing the update.
- `safe` (optional): check that the update succeeded?
- `multi` (optional): update all documents that match
`spec`, rather than just the first matching document. The
default value for `multi` is currently ``False``, but this
might eventually change to ``True``. It is recommended
that you specify this argument explicitly for all update
operations in order to prepare your code for that change.
- `**kwargs` (optional): any additional arguments imply
``safe=True``, and will be used as options for the
`getLastError` command
.. _update modifiers: http://www.mongodb.org/display/DOCS/Updating
.. mongodoc:: update
"""
if not isinstance(spec, dict):
raise TypeError("spec must be an instance of dict")
if not isinstance(document, dict):
raise TypeError("document must be an instance of dict")
if not isinstance(upsert, bool):
raise TypeError("upsert must be an instance of bool")
if not isinstance(safe, bool):
raise TypeError("safe must be an instance of bool")
# TODO: apply SON manipulators
# if upsert and manipulate:
# document = self.__database._fix_incoming(document, self)
if kwargs:
safe = True
if safe and not callable(callback):
raise TypeError("callback must be callable")
if not safe and callback is not None:
raise TypeError("callback can not be used with safe=False")
if callback:
callback = functools.partial(self._handle_response, orig_callback=callback)
self.__limit = None
connection = self.__pool.connection()
try:
connection.send_message(
message.update(self.full_collection_name, upsert, multi,
spec, document, safe, kwargs), callback=callback)
except:
connection.close()
raise | Update a document(s) in this collection.
Raises :class:`TypeError` if either `spec` or `document` is
not an instance of ``dict`` or `upsert` is not an instance of
``bool``. If `safe` is ``True`` then the update will be
checked for errors, raising
:class:`~pymongo.errors.OperationFailure` if one
occurred. Safe updates require a response from the database,
while normal updates do not - thus, setting `safe` to ``True``
will negatively impact performance.
There are many useful `update modifiers`_ which can be used
when performing updates. For example, here we use the
``"$set"`` modifier to modify some fields in a matching
document:
.. doctest::
>>> db.test.insert({"x": "y", "a": "b"})
ObjectId('...')
>>> list(db.test.find())
[{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}]
>>> db.test.update({"x": "y"}, {"$set": {"a": "c"}})
>>> list(db.test.find())
[{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}]
If `safe` is ``True`` returns the response to the *lastError*
command. Otherwise, returns ``None``.
# Any additional keyword arguments imply ``safe=True``, and will
# be used as options for the resultant `getLastError`
# command. For example, to wait for replication to 3 nodes, pass
# ``w=3``.
:Parameters:
- `spec`: a ``dict`` or :class:`~bson.son.SON` instance
specifying elements which must be present for a document
to be updated
- `document`: a ``dict`` or :class:`~bson.son.SON`
instance specifying the document to be used for the update
or (in the case of an upsert) insert - see docs on MongoDB
`update modifiers`_
- `upsert` (optional): perform an upsert if ``True``
- `manipulate` (optional): manipulate the document before
updating? If ``True`` all instances of
:mod:`~pymongo.son_manipulator.SONManipulator` added to
this :class:`~pymongo.database.Database` will be applied
to the document before performing the update.
- `safe` (optional): check that the update succeeded?
- `multi` (optional): update all documents that match
`spec`, rather than just the first matching document. The
default value for `multi` is currently ``False``, but this
might eventually change to ``True``. It is recommended
that you specify this argument explicitly for all update
operations in order to prepare your code for that change.
- `**kwargs` (optional): any additional arguments imply
``safe=True``, and will be used as options for the
`getLastError` command
.. _update modifiers: http://www.mongodb.org/display/DOCS/Updating
.. mongodoc:: update | Below is the the instruction that describes the task:
### Input:
Update a document(s) in this collection.
Raises :class:`TypeError` if either `spec` or `document` is
not an instance of ``dict`` or `upsert` is not an instance of
``bool``. If `safe` is ``True`` then the update will be
checked for errors, raising
:class:`~pymongo.errors.OperationFailure` if one
occurred. Safe updates require a response from the database,
while normal updates do not - thus, setting `safe` to ``True``
will negatively impact performance.
There are many useful `update modifiers`_ which can be used
when performing updates. For example, here we use the
``"$set"`` modifier to modify some fields in a matching
document:
.. doctest::
>>> db.test.insert({"x": "y", "a": "b"})
ObjectId('...')
>>> list(db.test.find())
[{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}]
>>> db.test.update({"x": "y"}, {"$set": {"a": "c"}})
>>> list(db.test.find())
[{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}]
If `safe` is ``True`` returns the response to the *lastError*
command. Otherwise, returns ``None``.
# Any additional keyword arguments imply ``safe=True``, and will
# be used as options for the resultant `getLastError`
# command. For example, to wait for replication to 3 nodes, pass
# ``w=3``.
:Parameters:
- `spec`: a ``dict`` or :class:`~bson.son.SON` instance
specifying elements which must be present for a document
to be updated
- `document`: a ``dict`` or :class:`~bson.son.SON`
instance specifying the document to be used for the update
or (in the case of an upsert) insert - see docs on MongoDB
`update modifiers`_
- `upsert` (optional): perform an upsert if ``True``
- `manipulate` (optional): manipulate the document before
updating? If ``True`` all instances of
:mod:`~pymongo.son_manipulator.SONManipulator` added to
this :class:`~pymongo.database.Database` will be applied
to the document before performing the update.
- `safe` (optional): check that the update succeeded?
- `multi` (optional): update all documents that match
`spec`, rather than just the first matching document. The
default value for `multi` is currently ``False``, but this
might eventually change to ``True``. It is recommended
that you specify this argument explicitly for all update
operations in order to prepare your code for that change.
- `**kwargs` (optional): any additional arguments imply
``safe=True``, and will be used as options for the
`getLastError` command
.. _update modifiers: http://www.mongodb.org/display/DOCS/Updating
.. mongodoc:: update
### Response:
def update(self, spec, document, upsert=False, manipulate=False,
safe=True, multi=False, callback=None, **kwargs):
"""Update a document(s) in this collection.
Raises :class:`TypeError` if either `spec` or `document` is
not an instance of ``dict`` or `upsert` is not an instance of
``bool``. If `safe` is ``True`` then the update will be
checked for errors, raising
:class:`~pymongo.errors.OperationFailure` if one
occurred. Safe updates require a response from the database,
while normal updates do not - thus, setting `safe` to ``True``
will negatively impact performance.
There are many useful `update modifiers`_ which can be used
when performing updates. For example, here we use the
``"$set"`` modifier to modify some fields in a matching
document:
.. doctest::
>>> db.test.insert({"x": "y", "a": "b"})
ObjectId('...')
>>> list(db.test.find())
[{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}]
>>> db.test.update({"x": "y"}, {"$set": {"a": "c"}})
>>> list(db.test.find())
[{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}]
If `safe` is ``True`` returns the response to the *lastError*
command. Otherwise, returns ``None``.
# Any additional keyword arguments imply ``safe=True``, and will
# be used as options for the resultant `getLastError`
# command. For example, to wait for replication to 3 nodes, pass
# ``w=3``.
:Parameters:
- `spec`: a ``dict`` or :class:`~bson.son.SON` instance
specifying elements which must be present for a document
to be updated
- `document`: a ``dict`` or :class:`~bson.son.SON`
instance specifying the document to be used for the update
or (in the case of an upsert) insert - see docs on MongoDB
`update modifiers`_
- `upsert` (optional): perform an upsert if ``True``
- `manipulate` (optional): manipulate the document before
updating? If ``True`` all instances of
:mod:`~pymongo.son_manipulator.SONManipulator` added to
this :class:`~pymongo.database.Database` will be applied
to the document before performing the update.
- `safe` (optional): check that the update succeeded?
- `multi` (optional): update all documents that match
`spec`, rather than just the first matching document. The
default value for `multi` is currently ``False``, but this
might eventually change to ``True``. It is recommended
that you specify this argument explicitly for all update
operations in order to prepare your code for that change.
- `**kwargs` (optional): any additional arguments imply
``safe=True``, and will be used as options for the
`getLastError` command
.. _update modifiers: http://www.mongodb.org/display/DOCS/Updating
.. mongodoc:: update
"""
if not isinstance(spec, dict):
raise TypeError("spec must be an instance of dict")
if not isinstance(document, dict):
raise TypeError("document must be an instance of dict")
if not isinstance(upsert, bool):
raise TypeError("upsert must be an instance of bool")
if not isinstance(safe, bool):
raise TypeError("safe must be an instance of bool")
# TODO: apply SON manipulators
# if upsert and manipulate:
# document = self.__database._fix_incoming(document, self)
if kwargs:
safe = True
if safe and not callable(callback):
raise TypeError("callback must be callable")
if not safe and callback is not None:
raise TypeError("callback can not be used with safe=False")
if callback:
callback = functools.partial(self._handle_response, orig_callback=callback)
self.__limit = None
connection = self.__pool.connection()
try:
connection.send_message(
message.update(self.full_collection_name, upsert, multi,
spec, document, safe, kwargs), callback=callback)
except:
connection.close()
raise |
def _connect_signal(self, index):
"""Create signals for building indexes."""
post_save_signal = ElasticSignal(index, 'build')
post_save_signal.connect(post_save, sender=index.object_type)
self.signals.append(post_save_signal)
post_delete_signal = ElasticSignal(index, 'remove_object')
post_delete_signal.connect(post_delete, sender=index.object_type)
self.signals.append(post_delete_signal)
# Connect signals for all dependencies.
for dependency in index.get_dependencies():
# Automatically convert m2m fields to dependencies.
if isinstance(dependency, (models.ManyToManyField, ManyToManyDescriptor)):
dependency = ManyToManyDependency(dependency)
elif not isinstance(dependency, Dependency):
raise TypeError("Unsupported dependency type: {}".format(repr(dependency)))
signal = dependency.connect(index)
self.signals.extend(signal) | Create signals for building indexes. | Below is the the instruction that describes the task:
### Input:
Create signals for building indexes.
### Response:
def _connect_signal(self, index):
"""Create signals for building indexes."""
post_save_signal = ElasticSignal(index, 'build')
post_save_signal.connect(post_save, sender=index.object_type)
self.signals.append(post_save_signal)
post_delete_signal = ElasticSignal(index, 'remove_object')
post_delete_signal.connect(post_delete, sender=index.object_type)
self.signals.append(post_delete_signal)
# Connect signals for all dependencies.
for dependency in index.get_dependencies():
# Automatically convert m2m fields to dependencies.
if isinstance(dependency, (models.ManyToManyField, ManyToManyDescriptor)):
dependency = ManyToManyDependency(dependency)
elif not isinstance(dependency, Dependency):
raise TypeError("Unsupported dependency type: {}".format(repr(dependency)))
signal = dependency.connect(index)
self.signals.extend(signal) |
def tune(self):
"""XML node representing tune."""
if self._node.get('activities'):
tune = self._node['activities'].get('tune')
if type(tune) is collections.OrderedDict:
return tune
elif type(tune) is list:
return tune[0]
return tune
return None | XML node representing tune. | Below is the the instruction that describes the task:
### Input:
XML node representing tune.
### Response:
def tune(self):
"""XML node representing tune."""
if self._node.get('activities'):
tune = self._node['activities'].get('tune')
if type(tune) is collections.OrderedDict:
return tune
elif type(tune) is list:
return tune[0]
return tune
return None |
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event) | Reimplement Qt method | Below is the the instruction that describes the task:
### Input:
Reimplement Qt method
### Response:
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event) |
def add_inclusion(self, role, value):
"""Include item if `role` equals `value`
Attributes:
role (int): Qt role to compare `value` to
value (object): Value to exclude
"""
self._add_rule(self.includes, role, value) | Include item if `role` equals `value`
Attributes:
role (int): Qt role to compare `value` to
value (object): Value to exclude | Below is the the instruction that describes the task:
### Input:
Include item if `role` equals `value`
Attributes:
role (int): Qt role to compare `value` to
value (object): Value to exclude
### Response:
def add_inclusion(self, role, value):
"""Include item if `role` equals `value`
Attributes:
role (int): Qt role to compare `value` to
value (object): Value to exclude
"""
self._add_rule(self.includes, role, value) |
def stdout_logging(loglevel=logging.INFO):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(lineno)d: %(message)s"
logging.config.dictConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S") | Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages | Below is the the instruction that describes the task:
### Input:
Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
### Response:
def stdout_logging(loglevel=logging.INFO):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(lineno)d: %(message)s"
logging.config.dictConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S") |
def get_primitive_structure(self, tolerance=0.25, use_site_props=False,
constrain_latt=None):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differntiating sites.
constrain_latt (list/dict): List of lattice parameters we want to
preserve, e.g. ["alpha", "c"] or dict with the lattice
parameter names as keys and values we want the parameters to
be e.g. {"alpha": 90, "c": 2.5}.
Returns:
The most primitive structure found.
"""
if constrain_latt is None:
constrain_latt = []
def site_label(site):
if not use_site_props:
return site.species_string
else:
d = [site.species_string]
for k in sorted(site.properties.keys()):
d.append(k + "=" + str(site.properties[k]))
return ", ".join(d)
# group sites by species string
sites = sorted(self._sites, key=site_label)
grouped_sites = [
list(a[1])
for a in itertools.groupby(sites, key=site_label)]
grouped_fcoords = [np.array([s.frac_coords for s in g])
for g in grouped_sites]
# min_vecs are approximate periodicities of the cell. The exact
# periodicities from the supercell matrices are checked against these
# first
min_fcoords = min(grouped_fcoords, key=lambda x: len(x))
min_vecs = min_fcoords - min_fcoords[0]
# fractional tolerance in the supercell
super_ftol = np.divide(tolerance, self.lattice.abc)
super_ftol_2 = super_ftol * 2
def pbc_coord_intersection(fc1, fc2, tol):
"""
Returns the fractional coords in fc1 that have coordinates
within tolerance to some coordinate in fc2
"""
d = fc1[:, None, :] - fc2[None, :, :]
d -= np.round(d)
np.abs(d, d)
return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]
# here we reduce the number of min_vecs by enforcing that every
# vector in min_vecs approximately maps each site onto a similar site.
# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no
# reduction.
# This reduction is O(n^3) so usually is an improvement. Using double
# the tolerance because both vectors are approximate
for g in sorted(grouped_fcoords, key=lambda x: len(x)):
for f in g:
min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)
def get_hnf(fu):
"""
Returns all possible distinct supercell matrices given a
number of formula units in the supercell. Batches the matrices
by the values in the diagonal (for less numpy overhead).
Computational complexity is O(n^3), and difficult to improve.
Might be able to do something smart with checking combinations of a
and b first, though unlikely to reduce to O(n^2).
"""
def factors(n):
for i in range(1, n + 1):
if n % i == 0:
yield i
for det in factors(fu):
if det == 1:
continue
for a in factors(det):
for e in factors(det // a):
g = det // a // e
yield det, np.array(
[[[a, b, c], [0, e, f], [0, 0, g]]
for b, c, f in
itertools.product(range(a), range(a),
range(e))])
# we cant let sites match to their neighbors in the supercell
grouped_non_nbrs = []
for gfcoords in grouped_fcoords:
fdist = gfcoords[None, :, :] - gfcoords[:, None, :]
fdist -= np.round(fdist)
np.abs(fdist, fdist)
non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)
# since we want sites to match to themselves
np.fill_diagonal(non_nbrs, True)
grouped_non_nbrs.append(non_nbrs)
num_fu = functools.reduce(gcd, map(len, grouped_sites))
for size, ms in get_hnf(num_fu):
inv_ms = np.linalg.inv(ms)
# find sets of lattice vectors that are are present in min_vecs
dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]
dist -= np.round(dist)
np.abs(dist, dist)
is_close = np.all(dist < super_ftol, axis=-1)
any_close = np.any(is_close, axis=-1)
inds = np.all(any_close, axis=-1)
for inv_m, m in zip(inv_ms[inds], ms[inds]):
new_m = np.dot(inv_m, self.lattice.matrix)
ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1)))
valid = True
new_coords = []
new_sp = []
new_props = collections.defaultdict(list)
for gsites, gfcoords, non_nbrs in zip(grouped_sites,
grouped_fcoords,
grouped_non_nbrs):
all_frac = np.dot(gfcoords, m)
# calculate grouping of equivalent sites, represented by
# adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)
groups = np.logical_and(close_in_prim, non_nbrs)
# check that groups are correct
if not np.all(np.sum(groups, axis=0) == size):
valid = False
break
# check that groups are all cliques
for g in groups:
if not np.all(groups[g][:, g]):
valid = False
break
if not valid:
break
# add the new sites, averaging positions
added = np.zeros(len(gsites))
new_fcoords = all_frac % 1
for i, group in enumerate(groups):
if not added[i]:
added[group] = True
inds = np.where(group)[0]
coords = new_fcoords[inds[0]]
for n, j in enumerate(inds[1:]):
offset = new_fcoords[j] - coords
coords += (offset - np.round(offset)) / (n + 2)
new_sp.append(gsites[inds[0]].species)
for k in gsites[inds[0]].properties:
new_props[k].append(gsites[inds[0]].properties[k])
new_coords.append(coords)
if valid:
inv_m = np.linalg.inv(m)
new_l = Lattice(np.dot(inv_m, self.lattice.matrix))
s = Structure(new_l, new_sp, new_coords,
site_properties=new_props,
coords_are_cartesian=False)
# Default behavior
p = s.get_primitive_structure(
tolerance=tolerance, use_site_props=use_site_props,
constrain_latt=constrain_latt
).get_reduced_structure()
if not constrain_latt:
return p
# Only return primitive structures that
# satisfy the restriction condition
p_latt, s_latt = p.lattice, self.lattice
if type(constrain_latt).__name__ == "list":
if all([getattr(p_latt, p) == getattr(s_latt, p) for p in constrain_latt]):
return p
elif type(constrain_latt).__name__ == "dict":
if all([getattr(p_latt, p) == constrain_latt[p] for p in constrain_latt.keys()]):
return p
return self.copy() | This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differntiating sites.
constrain_latt (list/dict): List of lattice parameters we want to
preserve, e.g. ["alpha", "c"] or dict with the lattice
parameter names as keys and values we want the parameters to
be e.g. {"alpha": 90, "c": 2.5}.
Returns:
The most primitive structure found. | Below is the the instruction that describes the task:
### Input:
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differntiating sites.
constrain_latt (list/dict): List of lattice parameters we want to
preserve, e.g. ["alpha", "c"] or dict with the lattice
parameter names as keys and values we want the parameters to
be e.g. {"alpha": 90, "c": 2.5}.
Returns:
The most primitive structure found.
### Response:
def get_primitive_structure(self, tolerance=0.25, use_site_props=False,
constrain_latt=None):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differntiating sites.
constrain_latt (list/dict): List of lattice parameters we want to
preserve, e.g. ["alpha", "c"] or dict with the lattice
parameter names as keys and values we want the parameters to
be e.g. {"alpha": 90, "c": 2.5}.
Returns:
The most primitive structure found.
"""
if constrain_latt is None:
constrain_latt = []
def site_label(site):
if not use_site_props:
return site.species_string
else:
d = [site.species_string]
for k in sorted(site.properties.keys()):
d.append(k + "=" + str(site.properties[k]))
return ", ".join(d)
# group sites by species string
sites = sorted(self._sites, key=site_label)
grouped_sites = [
list(a[1])
for a in itertools.groupby(sites, key=site_label)]
grouped_fcoords = [np.array([s.frac_coords for s in g])
for g in grouped_sites]
# min_vecs are approximate periodicities of the cell. The exact
# periodicities from the supercell matrices are checked against these
# first
min_fcoords = min(grouped_fcoords, key=lambda x: len(x))
min_vecs = min_fcoords - min_fcoords[0]
# fractional tolerance in the supercell
super_ftol = np.divide(tolerance, self.lattice.abc)
super_ftol_2 = super_ftol * 2
def pbc_coord_intersection(fc1, fc2, tol):
"""
Returns the fractional coords in fc1 that have coordinates
within tolerance to some coordinate in fc2
"""
d = fc1[:, None, :] - fc2[None, :, :]
d -= np.round(d)
np.abs(d, d)
return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]
# here we reduce the number of min_vecs by enforcing that every
# vector in min_vecs approximately maps each site onto a similar site.
# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no
# reduction.
# This reduction is O(n^3) so usually is an improvement. Using double
# the tolerance because both vectors are approximate
for g in sorted(grouped_fcoords, key=lambda x: len(x)):
for f in g:
min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)
def get_hnf(fu):
"""
Returns all possible distinct supercell matrices given a
number of formula units in the supercell. Batches the matrices
by the values in the diagonal (for less numpy overhead).
Computational complexity is O(n^3), and difficult to improve.
Might be able to do something smart with checking combinations of a
and b first, though unlikely to reduce to O(n^2).
"""
def factors(n):
for i in range(1, n + 1):
if n % i == 0:
yield i
for det in factors(fu):
if det == 1:
continue
for a in factors(det):
for e in factors(det // a):
g = det // a // e
yield det, np.array(
[[[a, b, c], [0, e, f], [0, 0, g]]
for b, c, f in
itertools.product(range(a), range(a),
range(e))])
# we cant let sites match to their neighbors in the supercell
grouped_non_nbrs = []
for gfcoords in grouped_fcoords:
fdist = gfcoords[None, :, :] - gfcoords[:, None, :]
fdist -= np.round(fdist)
np.abs(fdist, fdist)
non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)
# since we want sites to match to themselves
np.fill_diagonal(non_nbrs, True)
grouped_non_nbrs.append(non_nbrs)
num_fu = functools.reduce(gcd, map(len, grouped_sites))
for size, ms in get_hnf(num_fu):
inv_ms = np.linalg.inv(ms)
# find sets of lattice vectors that are are present in min_vecs
dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]
dist -= np.round(dist)
np.abs(dist, dist)
is_close = np.all(dist < super_ftol, axis=-1)
any_close = np.any(is_close, axis=-1)
inds = np.all(any_close, axis=-1)
for inv_m, m in zip(inv_ms[inds], ms[inds]):
new_m = np.dot(inv_m, self.lattice.matrix)
ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1)))
valid = True
new_coords = []
new_sp = []
new_props = collections.defaultdict(list)
for gsites, gfcoords, non_nbrs in zip(grouped_sites,
grouped_fcoords,
grouped_non_nbrs):
all_frac = np.dot(gfcoords, m)
# calculate grouping of equivalent sites, represented by
# adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)
groups = np.logical_and(close_in_prim, non_nbrs)
# check that groups are correct
if not np.all(np.sum(groups, axis=0) == size):
valid = False
break
# check that groups are all cliques
for g in groups:
if not np.all(groups[g][:, g]):
valid = False
break
if not valid:
break
# add the new sites, averaging positions
added = np.zeros(len(gsites))
new_fcoords = all_frac % 1
for i, group in enumerate(groups):
if not added[i]:
added[group] = True
inds = np.where(group)[0]
coords = new_fcoords[inds[0]]
for n, j in enumerate(inds[1:]):
offset = new_fcoords[j] - coords
coords += (offset - np.round(offset)) / (n + 2)
new_sp.append(gsites[inds[0]].species)
for k in gsites[inds[0]].properties:
new_props[k].append(gsites[inds[0]].properties[k])
new_coords.append(coords)
if valid:
inv_m = np.linalg.inv(m)
new_l = Lattice(np.dot(inv_m, self.lattice.matrix))
s = Structure(new_l, new_sp, new_coords,
site_properties=new_props,
coords_are_cartesian=False)
# Default behavior
p = s.get_primitive_structure(
tolerance=tolerance, use_site_props=use_site_props,
constrain_latt=constrain_latt
).get_reduced_structure()
if not constrain_latt:
return p
# Only return primitive structures that
# satisfy the restriction condition
p_latt, s_latt = p.lattice, self.lattice
if type(constrain_latt).__name__ == "list":
if all([getattr(p_latt, p) == getattr(s_latt, p) for p in constrain_latt]):
return p
elif type(constrain_latt).__name__ == "dict":
if all([getattr(p_latt, p) == constrain_latt[p] for p in constrain_latt.keys()]):
return p
return self.copy() |
def refresh(self):
""" Refresh/Obtain an account's data from the API server
"""
import re
if re.match(r"^1\.2\.[0-9]*$", self.identifier):
account = self.blockchain.rpc.get_objects([self.identifier])[0]
else:
account = self.blockchain.rpc.lookup_account_names([self.identifier])[0]
if not account:
raise AccountDoesNotExistsException(self.identifier)
self.store(account, "name")
if self.full: # pragma: no cover
accounts = self.blockchain.rpc.get_full_accounts([account["id"]], False)
if accounts and isinstance(accounts, list):
account = accounts[0][1]
else:
raise AccountDoesNotExistsException(self.identifier)
super(Account, self).__init__(
account["account"], blockchain_instance=self.blockchain
)
for k, v in account.items():
if k != "account":
self[k] = v
else:
super(Account, self).__init__(account, blockchain_instance=self.blockchain) | Refresh/Obtain an account's data from the API server | Below is the the instruction that describes the task:
### Input:
Refresh/Obtain an account's data from the API server
### Response:
def refresh(self):
""" Refresh/Obtain an account's data from the API server
"""
import re
if re.match(r"^1\.2\.[0-9]*$", self.identifier):
account = self.blockchain.rpc.get_objects([self.identifier])[0]
else:
account = self.blockchain.rpc.lookup_account_names([self.identifier])[0]
if not account:
raise AccountDoesNotExistsException(self.identifier)
self.store(account, "name")
if self.full: # pragma: no cover
accounts = self.blockchain.rpc.get_full_accounts([account["id"]], False)
if accounts and isinstance(accounts, list):
account = accounts[0][1]
else:
raise AccountDoesNotExistsException(self.identifier)
super(Account, self).__init__(
account["account"], blockchain_instance=self.blockchain
)
for k, v in account.items():
if k != "account":
self[k] = v
else:
super(Account, self).__init__(account, blockchain_instance=self.blockchain) |
def is_device(obj):
"""
Returns True if obj is a device type (derived from DeviceBase), but not defined in
:mod:`lewis.core.devices` or :mod:`lewis.devices`.
:param obj: Object to test.
:return: True if obj is a device type.
"""
return isinstance(obj, type) and issubclass(
obj, DeviceBase) and obj.__module__ not in ('lewis.devices', 'lewis.core.devices') | Returns True if obj is a device type (derived from DeviceBase), but not defined in
:mod:`lewis.core.devices` or :mod:`lewis.devices`.
:param obj: Object to test.
:return: True if obj is a device type. | Below is the the instruction that describes the task:
### Input:
Returns True if obj is a device type (derived from DeviceBase), but not defined in
:mod:`lewis.core.devices` or :mod:`lewis.devices`.
:param obj: Object to test.
:return: True if obj is a device type.
### Response:
def is_device(obj):
"""
Returns True if obj is a device type (derived from DeviceBase), but not defined in
:mod:`lewis.core.devices` or :mod:`lewis.devices`.
:param obj: Object to test.
:return: True if obj is a device type.
"""
return isinstance(obj, type) and issubclass(
obj, DeviceBase) and obj.__module__ not in ('lewis.devices', 'lewis.core.devices') |
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse | Toggles direction of test
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Toggles direction of test
:rtype: bool
### Response:
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse |
def itemData(self, treeItem, column, role=Qt.DisplayRole):
""" Returns the data stored under the given role for the item. O
"""
if role == Qt.DisplayRole:
if column == self.COL_NODE_NAME:
return treeItem.nodeName
elif column == self.COL_NODE_PATH:
return treeItem.nodePath
elif column == self.COL_SHAPE:
if treeItem.isSliceable:
return " x ".join(str(elem) for elem in treeItem.arrayShape)
else:
return ""
elif column == self.COL_IS_OPEN:
# Only show for RTIs that actually open resources.
# TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default?
if treeItem.hasChildren():
return str(treeItem.isOpen)
else:
return ""
elif column == self.COL_ELEM_TYPE:
return treeItem.elementTypeName
elif column == self.COL_FILE_NAME:
return treeItem.fileName if hasattr(treeItem, 'fileName') else ''
elif column == self.COL_UNIT:
return treeItem.unit
elif column == self.COL_MISSING_DATA:
return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones
elif column == self.COL_RTI_TYPE:
return type_name(treeItem)
elif column == self.COL_EXCEPTION:
return str(treeItem.exception) if treeItem.exception else ''
else:
raise ValueError("Invalid column: {}".format(column))
elif role == Qt.ToolTipRole:
if treeItem.exception:
return str(treeItem.exception)
if column == self.COL_NODE_NAME:
return treeItem.nodePath # Also path when hovering over the name
elif column == self.COL_NODE_PATH:
return treeItem.nodePath
elif column == self.COL_SHAPE:
if treeItem.isSliceable:
return " x ".join(str(elem) for elem in treeItem.arrayShape)
else:
return ""
elif column == self.COL_UNIT:
return treeItem.unit
elif column == self.COL_MISSING_DATA:
return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones
elif column == self.COL_RTI_TYPE:
return type_name(treeItem)
elif column == self.COL_ELEM_TYPE:
return treeItem.elementTypeName
elif column == self.COL_FILE_NAME:
return treeItem.fileName if hasattr(treeItem, 'fileName') else ''
else:
return None
else:
return super(RepoTreeModel, self).itemData(treeItem, column, role=role) | Returns the data stored under the given role for the item. O | Below is the the instruction that describes the task:
### Input:
Returns the data stored under the given role for the item. O
### Response:
def itemData(self, treeItem, column, role=Qt.DisplayRole):
""" Returns the data stored under the given role for the item. O
"""
if role == Qt.DisplayRole:
if column == self.COL_NODE_NAME:
return treeItem.nodeName
elif column == self.COL_NODE_PATH:
return treeItem.nodePath
elif column == self.COL_SHAPE:
if treeItem.isSliceable:
return " x ".join(str(elem) for elem in treeItem.arrayShape)
else:
return ""
elif column == self.COL_IS_OPEN:
# Only show for RTIs that actually open resources.
# TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default?
if treeItem.hasChildren():
return str(treeItem.isOpen)
else:
return ""
elif column == self.COL_ELEM_TYPE:
return treeItem.elementTypeName
elif column == self.COL_FILE_NAME:
return treeItem.fileName if hasattr(treeItem, 'fileName') else ''
elif column == self.COL_UNIT:
return treeItem.unit
elif column == self.COL_MISSING_DATA:
return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones
elif column == self.COL_RTI_TYPE:
return type_name(treeItem)
elif column == self.COL_EXCEPTION:
return str(treeItem.exception) if treeItem.exception else ''
else:
raise ValueError("Invalid column: {}".format(column))
elif role == Qt.ToolTipRole:
if treeItem.exception:
return str(treeItem.exception)
if column == self.COL_NODE_NAME:
return treeItem.nodePath # Also path when hovering over the name
elif column == self.COL_NODE_PATH:
return treeItem.nodePath
elif column == self.COL_SHAPE:
if treeItem.isSliceable:
return " x ".join(str(elem) for elem in treeItem.arrayShape)
else:
return ""
elif column == self.COL_UNIT:
return treeItem.unit
elif column == self.COL_MISSING_DATA:
return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones
elif column == self.COL_RTI_TYPE:
return type_name(treeItem)
elif column == self.COL_ELEM_TYPE:
return treeItem.elementTypeName
elif column == self.COL_FILE_NAME:
return treeItem.fileName if hasattr(treeItem, 'fileName') else ''
else:
return None
else:
return super(RepoTreeModel, self).itemData(treeItem, column, role=role) |
def dBinaryRochedz(r, D, q, F):
"""
Computes a derivative of the potential with respect to z.
@param r: relative radius vector (3 components)
@param D: instantaneous separation
@param q: mass ratio
@param F: synchronicity parameter
"""
return -r[2]*(r[0]*r[0]+r[1]*r[1]+r[2]*r[2])**-1.5 -q*r[2]*((r[0]-D)*(r[0]-D)+r[1]*r[1]+r[2]*r[2])**-1.5 | Computes a derivative of the potential with respect to z.
@param r: relative radius vector (3 components)
@param D: instantaneous separation
@param q: mass ratio
@param F: synchronicity parameter | Below is the the instruction that describes the task:
### Input:
Computes a derivative of the potential with respect to z.
@param r: relative radius vector (3 components)
@param D: instantaneous separation
@param q: mass ratio
@param F: synchronicity parameter
### Response:
def dBinaryRochedz(r, D, q, F):
"""
Computes a derivative of the potential with respect to z.
@param r: relative radius vector (3 components)
@param D: instantaneous separation
@param q: mass ratio
@param F: synchronicity parameter
"""
return -r[2]*(r[0]*r[0]+r[1]*r[1]+r[2]*r[2])**-1.5 -q*r[2]*((r[0]-D)*(r[0]-D)+r[1]*r[1]+r[2]*r[2])**-1.5 |
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if the file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError('not a MicroManager TIFF file')
result = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + 'IIIIIIII', fh.read(32))
if summary_header != 2355492:
raise ValueError('invalid MicroManager summary header')
result['Summary'] = read_json(fh, byteorder, None, summary_length, None)
if index_header != 54773648:
raise ValueError('invalid MicroManager index header')
fh.seek(index_offset)
header, count = struct.unpack(byteorder + 'II', fh.read(8))
if header != 3453623:
raise ValueError('invalid MicroManager index header')
data = struct.unpack(byteorder + 'IIIII'*count, fh.read(20*count))
result['IndexMap'] = {'Channel': data[::5],
'Slice': data[1::5],
'Frame': data[2::5],
'Position': data[3::5],
'Offset': data[4::5]}
if display_header != 483765892:
raise ValueError('invalid MicroManager display header')
fh.seek(display_offset)
header, count = struct.unpack(byteorder + 'II', fh.read(8))
if header != 347834724:
raise ValueError('invalid MicroManager display header')
result['DisplaySettings'] = read_json(fh, byteorder, None, count, None)
if comments_header != 99384722:
raise ValueError('invalid MicroManager comments header')
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + 'II', fh.read(8))
if header != 84720485:
raise ValueError('invalid MicroManager comments header')
result['Comments'] = read_json(fh, byteorder, None, count, None)
return result | Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if the file does not contain valid MicroManager metadata. | Below is the the instruction that describes the task:
### Input:
Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if the file does not contain valid MicroManager metadata.
### Response:
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if the file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError('not a MicroManager TIFF file')
result = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + 'IIIIIIII', fh.read(32))
if summary_header != 2355492:
raise ValueError('invalid MicroManager summary header')
result['Summary'] = read_json(fh, byteorder, None, summary_length, None)
if index_header != 54773648:
raise ValueError('invalid MicroManager index header')
fh.seek(index_offset)
header, count = struct.unpack(byteorder + 'II', fh.read(8))
if header != 3453623:
raise ValueError('invalid MicroManager index header')
data = struct.unpack(byteorder + 'IIIII'*count, fh.read(20*count))
result['IndexMap'] = {'Channel': data[::5],
'Slice': data[1::5],
'Frame': data[2::5],
'Position': data[3::5],
'Offset': data[4::5]}
if display_header != 483765892:
raise ValueError('invalid MicroManager display header')
fh.seek(display_offset)
header, count = struct.unpack(byteorder + 'II', fh.read(8))
if header != 347834724:
raise ValueError('invalid MicroManager display header')
result['DisplaySettings'] = read_json(fh, byteorder, None, count, None)
if comments_header != 99384722:
raise ValueError('invalid MicroManager comments header')
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + 'II', fh.read(8))
if header != 84720485:
raise ValueError('invalid MicroManager comments header')
result['Comments'] = read_json(fh, byteorder, None, count, None)
return result |
def extract_command(outputdir, domain_methods, text_domain, keywords,
comment_tags, base_dir, project, version,
msgid_bugs_address):
"""Extracts strings into .pot files
:arg domain: domains to generate strings for or 'all' for all domains
:arg outputdir: output dir for .pot files; usually
locale/templates/LC_MESSAGES/
:arg domain_methods: DOMAIN_METHODS setting
:arg text_domain: TEXT_DOMAIN settings
:arg keywords: KEYWORDS setting
:arg comment_tags: COMMENT_TAGS setting
:arg base_dir: BASE_DIR setting
:arg project: PROJECT setting
:arg version: VERSION setting
:arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting
"""
# Must monkeypatch first to fix i18n extensions stomping issues!
monkeypatch_i18n()
# Create the outputdir if it doesn't exist
outputdir = os.path.abspath(outputdir)
if not os.path.isdir(outputdir):
print('Creating output dir %s ...' % outputdir)
os.makedirs(outputdir)
domains = domain_methods.keys()
def callback(filename, method, options):
if method != 'ignore':
print(' %s' % filename)
# Extract string for each domain
for domain in domains:
print('Extracting all strings in domain %s...' % domain)
methods = domain_methods[domain]
catalog = Catalog(
header_comment='',
project=project,
version=version,
msgid_bugs_address=msgid_bugs_address,
charset='utf-8',
)
extracted = extract_from_dir(
base_dir,
method_map=methods,
options_map=generate_options_map(),
keywords=keywords,
comment_tags=comment_tags,
callback=callback,
)
for filename, lineno, msg, cmts, ctxt in extracted:
catalog.add(msg, None, [(filename, lineno)], auto_comments=cmts,
context=ctxt)
with open(os.path.join(outputdir, '%s.pot' % domain), 'wb') as fp:
write_po(fp, catalog, width=80)
print('Done') | Extracts strings into .pot files
:arg domain: domains to generate strings for or 'all' for all domains
:arg outputdir: output dir for .pot files; usually
locale/templates/LC_MESSAGES/
:arg domain_methods: DOMAIN_METHODS setting
:arg text_domain: TEXT_DOMAIN settings
:arg keywords: KEYWORDS setting
:arg comment_tags: COMMENT_TAGS setting
:arg base_dir: BASE_DIR setting
:arg project: PROJECT setting
:arg version: VERSION setting
:arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting | Below is the the instruction that describes the task:
### Input:
Extracts strings into .pot files
:arg domain: domains to generate strings for or 'all' for all domains
:arg outputdir: output dir for .pot files; usually
locale/templates/LC_MESSAGES/
:arg domain_methods: DOMAIN_METHODS setting
:arg text_domain: TEXT_DOMAIN settings
:arg keywords: KEYWORDS setting
:arg comment_tags: COMMENT_TAGS setting
:arg base_dir: BASE_DIR setting
:arg project: PROJECT setting
:arg version: VERSION setting
:arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting
### Response:
def extract_command(outputdir, domain_methods, text_domain, keywords,
comment_tags, base_dir, project, version,
msgid_bugs_address):
"""Extracts strings into .pot files
:arg domain: domains to generate strings for or 'all' for all domains
:arg outputdir: output dir for .pot files; usually
locale/templates/LC_MESSAGES/
:arg domain_methods: DOMAIN_METHODS setting
:arg text_domain: TEXT_DOMAIN settings
:arg keywords: KEYWORDS setting
:arg comment_tags: COMMENT_TAGS setting
:arg base_dir: BASE_DIR setting
:arg project: PROJECT setting
:arg version: VERSION setting
:arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting
"""
# Must monkeypatch first to fix i18n extensions stomping issues!
monkeypatch_i18n()
# Create the outputdir if it doesn't exist
outputdir = os.path.abspath(outputdir)
if not os.path.isdir(outputdir):
print('Creating output dir %s ...' % outputdir)
os.makedirs(outputdir)
domains = domain_methods.keys()
def callback(filename, method, options):
if method != 'ignore':
print(' %s' % filename)
# Extract string for each domain
for domain in domains:
print('Extracting all strings in domain %s...' % domain)
methods = domain_methods[domain]
catalog = Catalog(
header_comment='',
project=project,
version=version,
msgid_bugs_address=msgid_bugs_address,
charset='utf-8',
)
extracted = extract_from_dir(
base_dir,
method_map=methods,
options_map=generate_options_map(),
keywords=keywords,
comment_tags=comment_tags,
callback=callback,
)
for filename, lineno, msg, cmts, ctxt in extracted:
catalog.add(msg, None, [(filename, lineno)], auto_comments=cmts,
context=ctxt)
with open(os.path.join(outputdir, '%s.pot' % domain), 'wb') as fp:
write_po(fp, catalog, width=80)
print('Done') |
def items(self, region_codes, include_subregions=False):
"""
Returns calendar classes for regions
:param region_codes list of ISO codes for selected regions
:param include_subregions boolean if subregions
of selected regions should be included in result
:rtype dict
:return dict where keys are ISO codes strings
and values are calendar classes
"""
items = OrderedDict()
for code in region_codes:
try:
items[code] = self.region_registry[code]
except KeyError:
continue
if include_subregions:
items.update(self.get_subregions(code))
return items | Returns calendar classes for regions
:param region_codes list of ISO codes for selected regions
:param include_subregions boolean if subregions
of selected regions should be included in result
:rtype dict
:return dict where keys are ISO codes strings
and values are calendar classes | Below is the the instruction that describes the task:
### Input:
Returns calendar classes for regions
:param region_codes list of ISO codes for selected regions
:param include_subregions boolean if subregions
of selected regions should be included in result
:rtype dict
:return dict where keys are ISO codes strings
and values are calendar classes
### Response:
def items(self, region_codes, include_subregions=False):
"""
Returns calendar classes for regions
:param region_codes list of ISO codes for selected regions
:param include_subregions boolean if subregions
of selected regions should be included in result
:rtype dict
:return dict where keys are ISO codes strings
and values are calendar classes
"""
items = OrderedDict()
for code in region_codes:
try:
items[code] = self.region_registry[code]
except KeyError:
continue
if include_subregions:
items.update(self.get_subregions(code))
return items |
async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
"""
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
"""
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results | Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'} | Below is the the instruction that describes the task:
### Input:
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
### Response:
async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
"""
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
"""
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.