code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a list of ICON EUV files.
Notes
-----
Currently fixed to level-2
"""
desc = None
level = tag
if level == 'level_1':
code = 'L1'
desc = None
elif level == 'level_2':
code = 'L2'
desc = None
else:
raise ValueError('Unsupported level supplied: ' + level)
if format_str is None:
format_str = 'ICON_'+code+'_EUV_Daytime'
if desc is not None:
format_str += '_' + desc +'_'
format_str += '_{year:4d}-{month:02d}-{day:02d}_v{version:02d}r{revision:03d}.NC'
return pysat.Files.from_os(data_path=data_path,
format_str=format_str) | Produce a list of ICON EUV files.
Notes
-----
Currently fixed to level-2 | Below is the the instruction that describes the task:
### Input:
Produce a list of ICON EUV files.
Notes
-----
Currently fixed to level-2
### Response:
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a list of ICON EUV files.
Notes
-----
Currently fixed to level-2
"""
desc = None
level = tag
if level == 'level_1':
code = 'L1'
desc = None
elif level == 'level_2':
code = 'L2'
desc = None
else:
raise ValueError('Unsupported level supplied: ' + level)
if format_str is None:
format_str = 'ICON_'+code+'_EUV_Daytime'
if desc is not None:
format_str += '_' + desc +'_'
format_str += '_{year:4d}-{month:02d}-{day:02d}_v{version:02d}r{revision:03d}.NC'
return pysat.Files.from_os(data_path=data_path,
format_str=format_str) |
def register_rml(self, filepath, **kwargs):
"""
Registers the filepath for an rml mapping
Args:
-----
filepath: the path the rml file
"""
name = os.path.split(filepath)[-1]
if name in self.rml_maps and self.rml_maps[name] != filepath:
raise Exception("RML name already registered. Filenames must be "
"unique.",
(self.rml_maps[name], filepath))
self.rml_maps[name] = filepath | Registers the filepath for an rml mapping
Args:
-----
filepath: the path the rml file | Below is the the instruction that describes the task:
### Input:
Registers the filepath for an rml mapping
Args:
-----
filepath: the path the rml file
### Response:
def register_rml(self, filepath, **kwargs):
"""
Registers the filepath for an rml mapping
Args:
-----
filepath: the path the rml file
"""
name = os.path.split(filepath)[-1]
if name in self.rml_maps and self.rml_maps[name] != filepath:
raise Exception("RML name already registered. Filenames must be "
"unique.",
(self.rml_maps[name], filepath))
self.rml_maps[name] = filepath |
def __set_sheet_filenames(sheets, n):
"""
Use the dataset name to build the filenames in the sheets metadata
:param list sheets: Sheet metadata
:param str n: Dataset Name
:return list: Sheet metadata
"""
try:
for idx, sheet in enumerate(sheets):
try:
sheets[idx]["filename"] = "{}.{}".format(n, sheet["filename"])
except Exception as e:
logger_excel.error("set_sheet_filenames: inner: {}".format(e), exc_info=True)
except Exception as q:
logger_excel.error("set_sheet_filenames: outer: {}".format(q), exc_info=True)
return sheets | Use the dataset name to build the filenames in the sheets metadata
:param list sheets: Sheet metadata
:param str n: Dataset Name
:return list: Sheet metadata | Below is the the instruction that describes the task:
### Input:
Use the dataset name to build the filenames in the sheets metadata
:param list sheets: Sheet metadata
:param str n: Dataset Name
:return list: Sheet metadata
### Response:
def __set_sheet_filenames(sheets, n):
"""
Use the dataset name to build the filenames in the sheets metadata
:param list sheets: Sheet metadata
:param str n: Dataset Name
:return list: Sheet metadata
"""
try:
for idx, sheet in enumerate(sheets):
try:
sheets[idx]["filename"] = "{}.{}".format(n, sheet["filename"])
except Exception as e:
logger_excel.error("set_sheet_filenames: inner: {}".format(e), exc_info=True)
except Exception as q:
logger_excel.error("set_sheet_filenames: outer: {}".format(q), exc_info=True)
return sheets |
def select_parser(self, request, parsers):
"""
Selects the appropriated parser which matches to the request's content type.
:param request: The HTTP request.
:param parsers: The lists of parsers.
:return: The parser selected or none.
"""
if not request.content_type:
return parsers[0], parsers[0].mimetype
mimetype = MimeType.parse(request.content_type)
for parser in parsers:
if mimetype.match(parser.mimetype):
return parser, mimetype
return None, None | Selects the appropriated parser which matches to the request's content type.
:param request: The HTTP request.
:param parsers: The lists of parsers.
:return: The parser selected or none. | Below is the the instruction that describes the task:
### Input:
Selects the appropriated parser which matches to the request's content type.
:param request: The HTTP request.
:param parsers: The lists of parsers.
:return: The parser selected or none.
### Response:
def select_parser(self, request, parsers):
"""
Selects the appropriated parser which matches to the request's content type.
:param request: The HTTP request.
:param parsers: The lists of parsers.
:return: The parser selected or none.
"""
if not request.content_type:
return parsers[0], parsers[0].mimetype
mimetype = MimeType.parse(request.content_type)
for parser in parsers:
if mimetype.match(parser.mimetype):
return parser, mimetype
return None, None |
def validate_instance(cls, opts):
"""Validates an instance of global options for cases that are not prohibited via registration.
For example: mutually exclusive options may be registered by passing a `mutually_exclusive_group`,
but when multiple flags must be specified together, it can be necessary to specify post-parse
checks.
Raises pants.option.errors.OptionsError on validation failure.
"""
if opts.loop and (not opts.v2 or opts.v1):
raise OptionsError('The --loop option only works with @console_rules, and thus requires '
'`--v2 --no-v1` to function as expected.')
if opts.loop and not opts.enable_pantsd:
raise OptionsError('The --loop option requires `--enable-pantsd`, in order to watch files.')
if opts.v2_ui and not opts.v2:
raise OptionsError('The --v2-ui option requires --v2 to be enabled together.') | Validates an instance of global options for cases that are not prohibited via registration.
For example: mutually exclusive options may be registered by passing a `mutually_exclusive_group`,
but when multiple flags must be specified together, it can be necessary to specify post-parse
checks.
Raises pants.option.errors.OptionsError on validation failure. | Below is the the instruction that describes the task:
### Input:
Validates an instance of global options for cases that are not prohibited via registration.
For example: mutually exclusive options may be registered by passing a `mutually_exclusive_group`,
but when multiple flags must be specified together, it can be necessary to specify post-parse
checks.
Raises pants.option.errors.OptionsError on validation failure.
### Response:
def validate_instance(cls, opts):
"""Validates an instance of global options for cases that are not prohibited via registration.
For example: mutually exclusive options may be registered by passing a `mutually_exclusive_group`,
but when multiple flags must be specified together, it can be necessary to specify post-parse
checks.
Raises pants.option.errors.OptionsError on validation failure.
"""
if opts.loop and (not opts.v2 or opts.v1):
raise OptionsError('The --loop option only works with @console_rules, and thus requires '
'`--v2 --no-v1` to function as expected.')
if opts.loop and not opts.enable_pantsd:
raise OptionsError('The --loop option requires `--enable-pantsd`, in order to watch files.')
if opts.v2_ui and not opts.v2:
raise OptionsError('The --v2-ui option requires --v2 to be enabled together.') |
def remove(cls, target, exclude=None, ctx=None, select=lambda *p: True):
"""Remove from target annotations which inherit from cls.
:param target: target from where remove annotations which inherits from
cls.
:param tuple/type exclude: annotation types to exclude from selection.
:param ctx: target ctx.
:param select: annotation selection function which takes in parameters
a target, a ctx and an annotation and return True if the annotation
has to be removed.
"""
# initialize exclude
exclude = () if exclude is None else exclude
try:
# get local annotations
local_annotations = get_local_property(
target, Annotation.__ANNOTATIONS_KEY__
)
except TypeError:
raise TypeError('target {0} must be hashable'.format(target))
# if there are local annotations
if local_annotations is not None:
# get annotations to remove which inherits from cls
annotations_to_remove = [
annotation for annotation in local_annotations
if (
isinstance(annotation, cls)
and not isinstance(annotation, exclude)
and select(target, ctx, annotation)
)
]
# and remove annotations from target
for annotation_to_remove in annotations_to_remove:
annotation_to_remove.remove_from(target) | Remove from target annotations which inherit from cls.
:param target: target from where remove annotations which inherits from
cls.
:param tuple/type exclude: annotation types to exclude from selection.
:param ctx: target ctx.
:param select: annotation selection function which takes in parameters
a target, a ctx and an annotation and return True if the annotation
has to be removed. | Below is the the instruction that describes the task:
### Input:
Remove from target annotations which inherit from cls.
:param target: target from where remove annotations which inherits from
cls.
:param tuple/type exclude: annotation types to exclude from selection.
:param ctx: target ctx.
:param select: annotation selection function which takes in parameters
a target, a ctx and an annotation and return True if the annotation
has to be removed.
### Response:
def remove(cls, target, exclude=None, ctx=None, select=lambda *p: True):
"""Remove from target annotations which inherit from cls.
:param target: target from where remove annotations which inherits from
cls.
:param tuple/type exclude: annotation types to exclude from selection.
:param ctx: target ctx.
:param select: annotation selection function which takes in parameters
a target, a ctx and an annotation and return True if the annotation
has to be removed.
"""
# initialize exclude
exclude = () if exclude is None else exclude
try:
# get local annotations
local_annotations = get_local_property(
target, Annotation.__ANNOTATIONS_KEY__
)
except TypeError:
raise TypeError('target {0} must be hashable'.format(target))
# if there are local annotations
if local_annotations is not None:
# get annotations to remove which inherits from cls
annotations_to_remove = [
annotation for annotation in local_annotations
if (
isinstance(annotation, cls)
and not isinstance(annotation, exclude)
and select(target, ctx, annotation)
)
]
# and remove annotations from target
for annotation_to_remove in annotations_to_remove:
annotation_to_remove.remove_from(target) |
def search(term, provider=None):
"""Search for genomes that contain TERM in their name or description."""
for row in genomepy.search(term, provider):
print("\t".join([x.decode('utf-8', 'ignore') for x in row])) | Search for genomes that contain TERM in their name or description. | Below is the the instruction that describes the task:
### Input:
Search for genomes that contain TERM in their name or description.
### Response:
def search(term, provider=None):
"""Search for genomes that contain TERM in their name or description."""
for row in genomepy.search(term, provider):
print("\t".join([x.decode('utf-8', 'ignore') for x in row])) |
def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False):
"""
Create Bloomberg connection
Returns:
(Bloomberg connection, if connection is new)
"""
if _CON_SYM_ in globals():
if not isinstance(globals()[_CON_SYM_], pdblp.BCon):
del globals()[_CON_SYM_]
if (_CON_SYM_ in globals()) and (not restart):
con = globals()[_CON_SYM_]
if getattr(con, '_session').start(): con.start()
return con, False
else:
con = pdblp.BCon(port=port, timeout=timeout)
globals()[_CON_SYM_] = con
con.start()
return con, True | Create Bloomberg connection
Returns:
(Bloomberg connection, if connection is new) | Below is the the instruction that describes the task:
### Input:
Create Bloomberg connection
Returns:
(Bloomberg connection, if connection is new)
### Response:
def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False):
"""
Create Bloomberg connection
Returns:
(Bloomberg connection, if connection is new)
"""
if _CON_SYM_ in globals():
if not isinstance(globals()[_CON_SYM_], pdblp.BCon):
del globals()[_CON_SYM_]
if (_CON_SYM_ in globals()) and (not restart):
con = globals()[_CON_SYM_]
if getattr(con, '_session').start(): con.start()
return con, False
else:
con = pdblp.BCon(port=port, timeout=timeout)
globals()[_CON_SYM_] = con
con.start()
return con, True |
def wysiwyg_editor(field_id, editor_name=None, config=None, editor_override=None):
"""
Turn the textarea #field_id into a rich editor. If you do not specify the
JavaScript name of the editor, it will be derived from the field_id.
If you don't specify the editor_name then you'll have a JavaScript object
named "<field_id>_editor" in the global namespace. We give you control of
this in case you have a complex JS ctxironment.
"""
if not editor_name:
editor_name = "%s_editor" % field_id
ctx = {
'field_id': field_id,
'editor_name': editor_name,
'config': config
}
ctx.update(get_settings(editor_override=editor_override))
if editor_override is not None:
ctx['DJANGO_WYSIWYG_FLAVOR'] = editor_override
return render_to_string(
"django_wysiwyg/%s/editor_instance.html" % ctx['DJANGO_WYSIWYG_FLAVOR'],
ctx
) | Turn the textarea #field_id into a rich editor. If you do not specify the
JavaScript name of the editor, it will be derived from the field_id.
If you don't specify the editor_name then you'll have a JavaScript object
named "<field_id>_editor" in the global namespace. We give you control of
this in case you have a complex JS ctxironment. | Below is the the instruction that describes the task:
### Input:
Turn the textarea #field_id into a rich editor. If you do not specify the
JavaScript name of the editor, it will be derived from the field_id.
If you don't specify the editor_name then you'll have a JavaScript object
named "<field_id>_editor" in the global namespace. We give you control of
this in case you have a complex JS ctxironment.
### Response:
def wysiwyg_editor(field_id, editor_name=None, config=None, editor_override=None):
"""
Turn the textarea #field_id into a rich editor. If you do not specify the
JavaScript name of the editor, it will be derived from the field_id.
If you don't specify the editor_name then you'll have a JavaScript object
named "<field_id>_editor" in the global namespace. We give you control of
this in case you have a complex JS ctxironment.
"""
if not editor_name:
editor_name = "%s_editor" % field_id
ctx = {
'field_id': field_id,
'editor_name': editor_name,
'config': config
}
ctx.update(get_settings(editor_override=editor_override))
if editor_override is not None:
ctx['DJANGO_WYSIWYG_FLAVOR'] = editor_override
return render_to_string(
"django_wysiwyg/%s/editor_instance.html" % ctx['DJANGO_WYSIWYG_FLAVOR'],
ctx
) |
def load_fixture(fixture_file):
"""
Populate the database from a JSON file. Reads the JSON file FIXTURE_FILE
and uses it to populate the database. Fuxture files should consist of a
dictionary mapping database names to arrays of objects to store in those
databases.
"""
utils.check_for_local_server()
local_url = config["local_server"]["url"]
server = Server(local_url)
fixture = json.load(fixture_file)
for db_name, _items in fixture.items():
db = server[db_name]
with click.progressbar(
_items, label=db_name, length=len(_items)
) as items:
for item in items:
item_id = item["_id"]
if item_id in db:
old_item = db[item_id]
item["_rev"] = old_item["_rev"]
if item == old_item:
continue
db[item_id] = item | Populate the database from a JSON file. Reads the JSON file FIXTURE_FILE
and uses it to populate the database. Fuxture files should consist of a
dictionary mapping database names to arrays of objects to store in those
databases. | Below is the the instruction that describes the task:
### Input:
Populate the database from a JSON file. Reads the JSON file FIXTURE_FILE
and uses it to populate the database. Fuxture files should consist of a
dictionary mapping database names to arrays of objects to store in those
databases.
### Response:
def load_fixture(fixture_file):
"""
Populate the database from a JSON file. Reads the JSON file FIXTURE_FILE
and uses it to populate the database. Fuxture files should consist of a
dictionary mapping database names to arrays of objects to store in those
databases.
"""
utils.check_for_local_server()
local_url = config["local_server"]["url"]
server = Server(local_url)
fixture = json.load(fixture_file)
for db_name, _items in fixture.items():
db = server[db_name]
with click.progressbar(
_items, label=db_name, length=len(_items)
) as items:
for item in items:
item_id = item["_id"]
if item_id in db:
old_item = db[item_id]
item["_rev"] = old_item["_rev"]
if item == old_item:
continue
db[item_id] = item |
def createSections(self):
"""Create the sections of the cell."""
self.soma = h.Section(name='soma', cell=self)
self.dend = h.Section(name='dend', cell=self) | Create the sections of the cell. | Below is the the instruction that describes the task:
### Input:
Create the sections of the cell.
### Response:
def createSections(self):
"""Create the sections of the cell."""
self.soma = h.Section(name='soma', cell=self)
self.dend = h.Section(name='dend', cell=self) |
def _get_expr_variables(expression: z3.ExprRef) -> List[z3.ExprRef]:
"""
Gets the variables that make up the current expression
:param expression:
:return:
"""
result = []
if not expression.children() and not isinstance(expression, z3.BitVecNumRef):
result.append(expression)
for child in expression.children():
c_children = _get_expr_variables(child)
result.extend(c_children)
return result | Gets the variables that make up the current expression
:param expression:
:return: | Below is the the instruction that describes the task:
### Input:
Gets the variables that make up the current expression
:param expression:
:return:
### Response:
def _get_expr_variables(expression: z3.ExprRef) -> List[z3.ExprRef]:
"""
Gets the variables that make up the current expression
:param expression:
:return:
"""
result = []
if not expression.children() and not isinstance(expression, z3.BitVecNumRef):
result.append(expression)
for child in expression.children():
c_children = _get_expr_variables(child)
result.extend(c_children)
return result |
def grantTablePermission(self, login, user, table, perm):
"""
Parameters:
- login
- user
- table
- perm
"""
self.send_grantTablePermission(login, user, table, perm)
self.recv_grantTablePermission() | Parameters:
- login
- user
- table
- perm | Below is the the instruction that describes the task:
### Input:
Parameters:
- login
- user
- table
- perm
### Response:
def grantTablePermission(self, login, user, table, perm):
"""
Parameters:
- login
- user
- table
- perm
"""
self.send_grantTablePermission(login, user, table, perm)
self.recv_grantTablePermission() |
def validate_filters_or_records(filters_or_records):
"""Validation for filters_or_records variable from bulk_modify and bulk_delete"""
# If filters_or_records is empty, fail
if not filters_or_records:
raise ValueError('Must provide at least one filter tuples or Records')
# If filters_or_records is not list of Record or tuple, fail
if not isinstance(filters_or_records[0], (Record, tuple)):
raise ValueError('Cannot provide both filter tuples and Records')
# If filters_or_records is not list of either Record or only tuple, fail
_type = type(filters_or_records[0])
for item in filters_or_records:
if not isinstance(item, _type):
raise ValueError("Expected filter tuple or Record, received {0}".format(item))
return _type | Validation for filters_or_records variable from bulk_modify and bulk_delete | Below is the the instruction that describes the task:
### Input:
Validation for filters_or_records variable from bulk_modify and bulk_delete
### Response:
def validate_filters_or_records(filters_or_records):
"""Validation for filters_or_records variable from bulk_modify and bulk_delete"""
# If filters_or_records is empty, fail
if not filters_or_records:
raise ValueError('Must provide at least one filter tuples or Records')
# If filters_or_records is not list of Record or tuple, fail
if not isinstance(filters_or_records[0], (Record, tuple)):
raise ValueError('Cannot provide both filter tuples and Records')
# If filters_or_records is not list of either Record or only tuple, fail
_type = type(filters_or_records[0])
for item in filters_or_records:
if not isinstance(item, _type):
raise ValueError("Expected filter tuple or Record, received {0}".format(item))
return _type |
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port | Return proxy address to connect to as tuple object | Below is the the instruction that describes the task:
### Input:
Return proxy address to connect to as tuple object
### Response:
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port |
def evaluate_cached(self, **kwargs):
"""Wraps evaluate(), caching results"""
if not hasattr(self, 'result'):
self.result = self.evaluate(cache=True, **kwargs)
return self.result | Wraps evaluate(), caching results | Below is the the instruction that describes the task:
### Input:
Wraps evaluate(), caching results
### Response:
def evaluate_cached(self, **kwargs):
"""Wraps evaluate(), caching results"""
if not hasattr(self, 'result'):
self.result = self.evaluate(cache=True, **kwargs)
return self.result |
def _generateForTokenSecurity(self,
username, password,
tokenUrl,
expiration=None,
client='requestip'):
""" generates a token for a feature service """
query_dict = {'username': username,
'password': password,
'expiration':str(_defaultTokenExpiration),
'client': client,
'f': 'json'}
if client == "referer":
query_dict['referer'] = self._referer_url
if expiration is not None:
query_dict['expiration'] = expiration
secHandler = None
if self.cookiejar is not None:
secHandler = self
if secHandler is not None:
secHandler._method = "HANDLER"
token = self._post(url=tokenUrl,
param_dict=query_dict,
securityHandler=secHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if self.cookiejar is not None:
if secHandler is not None:
secHandler._method = "TOKEN"
if 'error' in token:
self._token = None
self._token_created_on = None
self._token_expires_on = None
self._expires_in = None
return token
elif 'status' in token:
self._token = None
self._token_created_on = None
self._token_expires_on = None
self._expires_in = None
#print token['message']
return token
else:
self._token = token['token']
self._token_created_on = datetime.datetime.now()
self._token_expires_on = datetime.datetime.fromtimestamp(token['expires'] /1000) - \
datetime.timedelta(seconds=1)
self._expires_in = (self._token_expires_on - self._token_created_on).total_seconds()
return token['token'] | generates a token for a feature service | Below is the the instruction that describes the task:
### Input:
generates a token for a feature service
### Response:
def _generateForTokenSecurity(self,
username, password,
tokenUrl,
expiration=None,
client='requestip'):
""" generates a token for a feature service """
query_dict = {'username': username,
'password': password,
'expiration':str(_defaultTokenExpiration),
'client': client,
'f': 'json'}
if client == "referer":
query_dict['referer'] = self._referer_url
if expiration is not None:
query_dict['expiration'] = expiration
secHandler = None
if self.cookiejar is not None:
secHandler = self
if secHandler is not None:
secHandler._method = "HANDLER"
token = self._post(url=tokenUrl,
param_dict=query_dict,
securityHandler=secHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if self.cookiejar is not None:
if secHandler is not None:
secHandler._method = "TOKEN"
if 'error' in token:
self._token = None
self._token_created_on = None
self._token_expires_on = None
self._expires_in = None
return token
elif 'status' in token:
self._token = None
self._token_created_on = None
self._token_expires_on = None
self._expires_in = None
#print token['message']
return token
else:
self._token = token['token']
self._token_created_on = datetime.datetime.now()
self._token_expires_on = datetime.datetime.fromtimestamp(token['expires'] /1000) - \
datetime.timedelta(seconds=1)
self._expires_in = (self._token_expires_on - self._token_created_on).total_seconds()
return token['token'] |
def create_halton_samples(order, dim=1, burnin=-1, primes=()):
"""
Create Halton sequence.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Halton sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Halton sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns (numpy.ndarray):
Halton sequence with ``shape == (dim, order)``.
"""
primes = list(primes)
if not primes:
prime_order = 10*dim
while len(primes) < dim:
primes = create_primes(prime_order)
prime_order *= 2
primes = primes[:dim]
assert len(primes) == dim, "not enough primes"
if burnin < 0:
burnin = max(primes)
out = numpy.empty((dim, order))
indices = [idx+burnin for idx in range(order)]
for dim_ in range(dim):
out[dim_] = create_van_der_corput_samples(
indices, number_base=primes[dim_])
return out | Create Halton sequence.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Halton sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Halton sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns (numpy.ndarray):
Halton sequence with ``shape == (dim, order)``. | Below is the the instruction that describes the task:
### Input:
Create Halton sequence.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Halton sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Halton sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns (numpy.ndarray):
Halton sequence with ``shape == (dim, order)``.
### Response:
def create_halton_samples(order, dim=1, burnin=-1, primes=()):
"""
Create Halton sequence.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Halton sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Halton sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns (numpy.ndarray):
Halton sequence with ``shape == (dim, order)``.
"""
primes = list(primes)
if not primes:
prime_order = 10*dim
while len(primes) < dim:
primes = create_primes(prime_order)
prime_order *= 2
primes = primes[:dim]
assert len(primes) == dim, "not enough primes"
if burnin < 0:
burnin = max(primes)
out = numpy.empty((dim, order))
indices = [idx+burnin for idx in range(order)]
for dim_ in range(dim):
out[dim_] = create_van_der_corput_samples(
indices, number_base=primes[dim_])
return out |
def initialize_worker(self):
"""initialize the worker thread"""
worker_thread = threading.Thread(
name="WorkerThread", target=message_worker, args=(self,))
worker_thread.setDaemon(True)
worker_thread.start() | initialize the worker thread | Below is the the instruction that describes the task:
### Input:
initialize the worker thread
### Response:
def initialize_worker(self):
"""initialize the worker thread"""
worker_thread = threading.Thread(
name="WorkerThread", target=message_worker, args=(self,))
worker_thread.setDaemon(True)
worker_thread.start() |
def ip_acl_ip_access_list_extended_hide_ip_acl_ext_seq_seq_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list")
ip = ET.SubElement(ip_acl, "ip")
access_list = ET.SubElement(ip, "access-list")
extended = ET.SubElement(access_list, "extended")
name_key = ET.SubElement(extended, "name")
name_key.text = kwargs.pop('name')
hide_ip_acl_ext = ET.SubElement(extended, "hide-ip-acl-ext")
seq = ET.SubElement(hide_ip_acl_ext, "seq")
seq_id = ET.SubElement(seq, "seq-id")
seq_id.text = kwargs.pop('seq_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ip_acl_ip_access_list_extended_hide_ip_acl_ext_seq_seq_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list")
ip = ET.SubElement(ip_acl, "ip")
access_list = ET.SubElement(ip, "access-list")
extended = ET.SubElement(access_list, "extended")
name_key = ET.SubElement(extended, "name")
name_key.text = kwargs.pop('name')
hide_ip_acl_ext = ET.SubElement(extended, "hide-ip-acl-ext")
seq = ET.SubElement(hide_ip_acl_ext, "seq")
seq_id = ET.SubElement(seq, "seq-id")
seq_id.text = kwargs.pop('seq_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def generic_type_args(type_: Type) -> List[Type]:
"""Gets the type argument list for the given generic type.
If you give this function List[int], it will return [int], and
if you give it Union[int, str] it will give you [int, str]. Note
that on Python < 3.7, Union[int, bool] collapses to Union[int] and
then to int; this is already done by the time this function is
called, so it does not help with that.
Args:
type_: The type to get the arguments list of.
Returns:
A list of Type objects.
"""
if hasattr(type_, '__union_params__'):
# 3.5 Union
return list(type_.__union_params__)
return list(type_.__args__) | Gets the type argument list for the given generic type.
If you give this function List[int], it will return [int], and
if you give it Union[int, str] it will give you [int, str]. Note
that on Python < 3.7, Union[int, bool] collapses to Union[int] and
then to int; this is already done by the time this function is
called, so it does not help with that.
Args:
type_: The type to get the arguments list of.
Returns:
A list of Type objects. | Below is the the instruction that describes the task:
### Input:
Gets the type argument list for the given generic type.
If you give this function List[int], it will return [int], and
if you give it Union[int, str] it will give you [int, str]. Note
that on Python < 3.7, Union[int, bool] collapses to Union[int] and
then to int; this is already done by the time this function is
called, so it does not help with that.
Args:
type_: The type to get the arguments list of.
Returns:
A list of Type objects.
### Response:
def generic_type_args(type_: Type) -> List[Type]:
"""Gets the type argument list for the given generic type.
If you give this function List[int], it will return [int], and
if you give it Union[int, str] it will give you [int, str]. Note
that on Python < 3.7, Union[int, bool] collapses to Union[int] and
then to int; this is already done by the time this function is
called, so it does not help with that.
Args:
type_: The type to get the arguments list of.
Returns:
A list of Type objects.
"""
if hasattr(type_, '__union_params__'):
# 3.5 Union
return list(type_.__union_params__)
return list(type_.__args__) |
def get_nics(vm_, **kwargs):
'''
Return info about the network interfaces of a named vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_nics <domain>
'''
conn = __get_conn(**kwargs)
nics = _get_nics(_get_domain(conn, vm_))
conn.close()
return nics | Return info about the network interfaces of a named vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_nics <domain> | Below is the the instruction that describes the task:
### Input:
Return info about the network interfaces of a named vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_nics <domain>
### Response:
def get_nics(vm_, **kwargs):
'''
Return info about the network interfaces of a named vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_nics <domain>
'''
conn = __get_conn(**kwargs)
nics = _get_nics(_get_domain(conn, vm_))
conn.close()
return nics |
def dumps(self):
"""Represent the environment as a string in LaTeX syntax.
Returns
-------
str
A LaTeX string representing the environment.
"""
content = self.dumps_content()
if not content.strip() and self.omit_if_empty:
return ''
string = ''
# Something other than None needs to be used as extra arguments, that
# way the options end up behind the latex_name argument.
if self.arguments is None:
extra_arguments = Arguments()
else:
extra_arguments = self.arguments
begin = Command('begin', self.start_arguments, self.options,
extra_arguments=extra_arguments)
begin.arguments._positional_args.insert(0, self.latex_name)
string += begin.dumps() + self.content_separator
string += content + self.content_separator
string += Command('end', self.latex_name).dumps()
return string | Represent the environment as a string in LaTeX syntax.
Returns
-------
str
A LaTeX string representing the environment. | Below is the the instruction that describes the task:
### Input:
Represent the environment as a string in LaTeX syntax.
Returns
-------
str
A LaTeX string representing the environment.
### Response:
def dumps(self):
"""Represent the environment as a string in LaTeX syntax.
Returns
-------
str
A LaTeX string representing the environment.
"""
content = self.dumps_content()
if not content.strip() and self.omit_if_empty:
return ''
string = ''
# Something other than None needs to be used as extra arguments, that
# way the options end up behind the latex_name argument.
if self.arguments is None:
extra_arguments = Arguments()
else:
extra_arguments = self.arguments
begin = Command('begin', self.start_arguments, self.options,
extra_arguments=extra_arguments)
begin.arguments._positional_args.insert(0, self.latex_name)
string += begin.dumps() + self.content_separator
string += content + self.content_separator
string += Command('end', self.latex_name).dumps()
return string |
def initiate_upgrade_action_and_wait(self, components_mask, action,
timeout=2, interval=0.1):
""" Initiate Upgrade Action and wait for
long running command. """
try:
self.initiate_upgrade_action(components_mask, action)
except CompletionCodeError as e:
if e.cc == CC_LONG_DURATION_CMD_IN_PROGRESS:
self.wait_for_long_duration_command(
constants.CMDID_HPM_INITIATE_UPGRADE_ACTION,
timeout, interval)
else:
raise HpmError('initiate_upgrade_action CC=0x%02x' % e.cc) | Initiate Upgrade Action and wait for
long running command. | Below is the the instruction that describes the task:
### Input:
Initiate Upgrade Action and wait for
long running command.
### Response:
def initiate_upgrade_action_and_wait(self, components_mask, action,
timeout=2, interval=0.1):
""" Initiate Upgrade Action and wait for
long running command. """
try:
self.initiate_upgrade_action(components_mask, action)
except CompletionCodeError as e:
if e.cc == CC_LONG_DURATION_CMD_IN_PROGRESS:
self.wait_for_long_duration_command(
constants.CMDID_HPM_INITIATE_UPGRADE_ACTION,
timeout, interval)
else:
raise HpmError('initiate_upgrade_action CC=0x%02x' % e.cc) |
def get_submissions(self, fullnames, *args, **kwargs):
"""Generate Submission objects for each item provided in `fullnames`.
A submission fullname looks like `t3_<base36_id>`. Submissions are
yielded in the same order they appear in `fullnames`.
Up to 100 items are batched at a time -- this happens transparently.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` and `limit` parameters cannot be
altered.
"""
fullnames = fullnames[:]
while fullnames:
cur = fullnames[:100]
fullnames[:100] = []
url = self.config['by_id'] + ','.join(cur)
for item in self.get_content(url, limit=len(cur), *args, **kwargs):
yield item | Generate Submission objects for each item provided in `fullnames`.
A submission fullname looks like `t3_<base36_id>`. Submissions are
yielded in the same order they appear in `fullnames`.
Up to 100 items are batched at a time -- this happens transparently.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` and `limit` parameters cannot be
altered. | Below is the the instruction that describes the task:
### Input:
Generate Submission objects for each item provided in `fullnames`.
A submission fullname looks like `t3_<base36_id>`. Submissions are
yielded in the same order they appear in `fullnames`.
Up to 100 items are batched at a time -- this happens transparently.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` and `limit` parameters cannot be
altered.
### Response:
def get_submissions(self, fullnames, *args, **kwargs):
"""Generate Submission objects for each item provided in `fullnames`.
A submission fullname looks like `t3_<base36_id>`. Submissions are
yielded in the same order they appear in `fullnames`.
Up to 100 items are batched at a time -- this happens transparently.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` and `limit` parameters cannot be
altered.
"""
fullnames = fullnames[:]
while fullnames:
cur = fullnames[:100]
fullnames[:100] = []
url = self.config['by_id'] + ','.join(cur)
for item in self.get_content(url, limit=len(cur), *args, **kwargs):
yield item |
def _get_unique(self, *args):
"""Generate a unique value using the assigned maker"""
# Generate a unique values
value = ''
attempts = 0
while True:
attempts += 1
value = self._maker(*args)
if value not in self._used_values:
break
assert attempts < self._max_attempts, \
'Too many attempts to generate a unique value'
# Add the value to the set of used values
self._used_values.add(value)
return value | Generate a unique value using the assigned maker | Below is the the instruction that describes the task:
### Input:
Generate a unique value using the assigned maker
### Response:
def _get_unique(self, *args):
"""Generate a unique value using the assigned maker"""
# Generate a unique values
value = ''
attempts = 0
while True:
attempts += 1
value = self._maker(*args)
if value not in self._used_values:
break
assert attempts < self._max_attempts, \
'Too many attempts to generate a unique value'
# Add the value to the set of used values
self._used_values.add(value)
return value |
def joinOn(self, model, onIndex):
"""
Performs an eqJoin on with the given model. The resulting join will be
accessible through the models name.
"""
return self._joinOnAsPriv(model, onIndex, model.__name__) | Performs an eqJoin on with the given model. The resulting join will be
accessible through the models name. | Below is the the instruction that describes the task:
### Input:
Performs an eqJoin on with the given model. The resulting join will be
accessible through the models name.
### Response:
def joinOn(self, model, onIndex):
"""
Performs an eqJoin on with the given model. The resulting join will be
accessible through the models name.
"""
return self._joinOnAsPriv(model, onIndex, model.__name__) |
def debug_string(self, max_debug=MAX_DEBUG_TRIALS):
"""Returns a human readable message for printing to the console."""
messages = self._debug_messages()
states = collections.defaultdict(set)
limit_per_state = collections.Counter()
for t in self._trials:
states[t.status].add(t)
# Show at most max_debug total, but divide the limit fairly
while max_debug > 0:
start_num = max_debug
for s in states:
if limit_per_state[s] >= len(states[s]):
continue
max_debug -= 1
limit_per_state[s] += 1
if max_debug == start_num:
break
for local_dir in sorted({t.local_dir for t in self._trials}):
messages.append("Result logdir: {}".format(local_dir))
num_trials_per_state = {
state: len(trials)
for state, trials in states.items()
}
total_number_of_trials = sum(num_trials_per_state.values())
if total_number_of_trials > 0:
messages.append("Number of trials: {} ({})"
"".format(total_number_of_trials,
num_trials_per_state))
for state, trials in sorted(states.items()):
limit = limit_per_state[state]
messages.append("{} trials:".format(state))
sorted_trials = sorted(
trials, key=lambda t: _naturalize(t.experiment_tag))
if len(trials) > limit:
tail_length = limit // 2
first = sorted_trials[:tail_length]
for t in first:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
messages.append(
" ... {} not shown".format(len(trials) - tail_length * 2))
last = sorted_trials[-tail_length:]
for t in last:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
else:
for t in sorted_trials:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
return "\n".join(messages) + "\n" | Returns a human readable message for printing to the console. | Below is the the instruction that describes the task:
### Input:
Returns a human readable message for printing to the console.
### Response:
def debug_string(self, max_debug=MAX_DEBUG_TRIALS):
"""Returns a human readable message for printing to the console."""
messages = self._debug_messages()
states = collections.defaultdict(set)
limit_per_state = collections.Counter()
for t in self._trials:
states[t.status].add(t)
# Show at most max_debug total, but divide the limit fairly
while max_debug > 0:
start_num = max_debug
for s in states:
if limit_per_state[s] >= len(states[s]):
continue
max_debug -= 1
limit_per_state[s] += 1
if max_debug == start_num:
break
for local_dir in sorted({t.local_dir for t in self._trials}):
messages.append("Result logdir: {}".format(local_dir))
num_trials_per_state = {
state: len(trials)
for state, trials in states.items()
}
total_number_of_trials = sum(num_trials_per_state.values())
if total_number_of_trials > 0:
messages.append("Number of trials: {} ({})"
"".format(total_number_of_trials,
num_trials_per_state))
for state, trials in sorted(states.items()):
limit = limit_per_state[state]
messages.append("{} trials:".format(state))
sorted_trials = sorted(
trials, key=lambda t: _naturalize(t.experiment_tag))
if len(trials) > limit:
tail_length = limit // 2
first = sorted_trials[:tail_length]
for t in first:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
messages.append(
" ... {} not shown".format(len(trials) - tail_length * 2))
last = sorted_trials[-tail_length:]
for t in last:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
else:
for t in sorted_trials:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
return "\n".join(messages) + "\n" |
def dispense(self,
volume=None,
location=None,
rate=1.0):
"""
Dispense a volume of liquid (in microliters/uL) using this pipette
Notes
-----
If only a volume is passed, the pipette will dispense
from it's current position. If only a location is passed,
`dispense` will default to it's `current_volume`
The location may be a Well, or a specific position in relation to a
Well, such as `Well.top()`. If a Well is specified without calling a
a position method (such as .top or .bottom), this method will default
to the bottom of the well.
Parameters
----------
volume : int or float
The number of microliters to dispense
(Default: self.current_volume)
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the dispense.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
rate : float
Set plunger speed for this dispense, where
speed = rate * dispense_speed (see :meth:`set_speed`)
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
# fill the pipette with liquid (200uL)
>>> p300.aspirate(plate[0]) # doctest: +SKIP
# dispense 50uL to a Well
>>> p300.dispense(50, plate[0]) # doctest: +SKIP
# dispense 50uL to the center of a well
>>> relative_vector = plate[1].center() # doctest: +SKIP
>>> p300.dispense(50, (plate[1], relative_vector)) # doctest: +SKIP
# dispense 20uL in place, at half the speed
>>> p300.dispense(20, rate=0.5) # doctest: +SKIP
# dispense the pipette's remaining volume (80uL) to a Well
>>> p300.dispense(plate[2]) # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot dispense without a tip attached.")
# Note: volume positional argument may not be passed. if it isn't then
# assume the first positional argument is the location
if not helpers.is_number(volume):
if volume and not location:
location = volume
volume = self.current_volume
# Ensure we don't dispense more than the current volume
volume = min(self.current_volume, volume)
display_location = location if location else self.previous_placeable
do_publish(self.broker, commands.dispense, self.dispense, 'before',
None, None, self, volume, display_location, rate)
# if volume is specified as 0uL, then do nothing
if volume != 0:
self._position_for_dispense(location)
mm_position = self._dispense_plunger_position(
self.current_volume - volume)
speed = self.speeds['dispense'] * rate
self.instrument_actuator.push_speed()
self.instrument_actuator.set_speed(speed)
self.instrument_actuator.set_active_current(self._plunger_current)
self.robot.poses = self.instrument_actuator.move(
self.robot.poses,
x=mm_position
)
self.instrument_actuator.pop_speed()
self.current_volume -= volume # update after actual dispense
do_publish(self.broker, commands.dispense, self.dispense, 'after',
self, None, self, volume, display_location, rate)
return self | Dispense a volume of liquid (in microliters/uL) using this pipette
Notes
-----
If only a volume is passed, the pipette will dispense
from it's current position. If only a location is passed,
`dispense` will default to it's `current_volume`
The location may be a Well, or a specific position in relation to a
Well, such as `Well.top()`. If a Well is specified without calling a
a position method (such as .top or .bottom), this method will default
to the bottom of the well.
Parameters
----------
volume : int or float
The number of microliters to dispense
(Default: self.current_volume)
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the dispense.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
rate : float
Set plunger speed for this dispense, where
speed = rate * dispense_speed (see :meth:`set_speed`)
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
# fill the pipette with liquid (200uL)
>>> p300.aspirate(plate[0]) # doctest: +SKIP
# dispense 50uL to a Well
>>> p300.dispense(50, plate[0]) # doctest: +SKIP
# dispense 50uL to the center of a well
>>> relative_vector = plate[1].center() # doctest: +SKIP
>>> p300.dispense(50, (plate[1], relative_vector)) # doctest: +SKIP
# dispense 20uL in place, at half the speed
>>> p300.dispense(20, rate=0.5) # doctest: +SKIP
# dispense the pipette's remaining volume (80uL) to a Well
>>> p300.dispense(plate[2]) # doctest: +SKIP | Below is the the instruction that describes the task:
### Input:
Dispense a volume of liquid (in microliters/uL) using this pipette
Notes
-----
If only a volume is passed, the pipette will dispense
from it's current position. If only a location is passed,
`dispense` will default to it's `current_volume`
The location may be a Well, or a specific position in relation to a
Well, such as `Well.top()`. If a Well is specified without calling a
a position method (such as .top or .bottom), this method will default
to the bottom of the well.
Parameters
----------
volume : int or float
The number of microliters to dispense
(Default: self.current_volume)
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the dispense.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
rate : float
Set plunger speed for this dispense, where
speed = rate * dispense_speed (see :meth:`set_speed`)
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
# fill the pipette with liquid (200uL)
>>> p300.aspirate(plate[0]) # doctest: +SKIP
# dispense 50uL to a Well
>>> p300.dispense(50, plate[0]) # doctest: +SKIP
# dispense 50uL to the center of a well
>>> relative_vector = plate[1].center() # doctest: +SKIP
>>> p300.dispense(50, (plate[1], relative_vector)) # doctest: +SKIP
# dispense 20uL in place, at half the speed
>>> p300.dispense(20, rate=0.5) # doctest: +SKIP
# dispense the pipette's remaining volume (80uL) to a Well
>>> p300.dispense(plate[2]) # doctest: +SKIP
### Response:
def dispense(self,
volume=None,
location=None,
rate=1.0):
"""
Dispense a volume of liquid (in microliters/uL) using this pipette
Notes
-----
If only a volume is passed, the pipette will dispense
from it's current position. If only a location is passed,
`dispense` will default to it's `current_volume`
The location may be a Well, or a specific position in relation to a
Well, such as `Well.top()`. If a Well is specified without calling a
a position method (such as .top or .bottom), this method will default
to the bottom of the well.
Parameters
----------
volume : int or float
The number of microliters to dispense
(Default: self.current_volume)
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the dispense.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
rate : float
Set plunger speed for this dispense, where
speed = rate * dispense_speed (see :meth:`set_speed`)
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
# fill the pipette with liquid (200uL)
>>> p300.aspirate(plate[0]) # doctest: +SKIP
# dispense 50uL to a Well
>>> p300.dispense(50, plate[0]) # doctest: +SKIP
# dispense 50uL to the center of a well
>>> relative_vector = plate[1].center() # doctest: +SKIP
>>> p300.dispense(50, (plate[1], relative_vector)) # doctest: +SKIP
# dispense 20uL in place, at half the speed
>>> p300.dispense(20, rate=0.5) # doctest: +SKIP
# dispense the pipette's remaining volume (80uL) to a Well
>>> p300.dispense(plate[2]) # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot dispense without a tip attached.")
# Note: volume positional argument may not be passed. if it isn't then
# assume the first positional argument is the location
if not helpers.is_number(volume):
if volume and not location:
location = volume
volume = self.current_volume
# Ensure we don't dispense more than the current volume
volume = min(self.current_volume, volume)
display_location = location if location else self.previous_placeable
do_publish(self.broker, commands.dispense, self.dispense, 'before',
None, None, self, volume, display_location, rate)
# if volume is specified as 0uL, then do nothing
if volume != 0:
self._position_for_dispense(location)
mm_position = self._dispense_plunger_position(
self.current_volume - volume)
speed = self.speeds['dispense'] * rate
self.instrument_actuator.push_speed()
self.instrument_actuator.set_speed(speed)
self.instrument_actuator.set_active_current(self._plunger_current)
self.robot.poses = self.instrument_actuator.move(
self.robot.poses,
x=mm_position
)
self.instrument_actuator.pop_speed()
self.current_volume -= volume # update after actual dispense
do_publish(self.broker, commands.dispense, self.dispense, 'after',
self, None, self, volume, display_location, rate)
return self |
def data(self, namespace):
"""
Gets the thread.local data (dict) for a given namespace.
Args:
namespace (string): The namespace, or key, of the data dict.
Returns:
(dict)
"""
assert namespace
if namespace in self._data:
return self._data[namespace]
new_data = {}
self._data[namespace] = new_data
return new_data | Gets the thread.local data (dict) for a given namespace.
Args:
namespace (string): The namespace, or key, of the data dict.
Returns:
(dict) | Below is the the instruction that describes the task:
### Input:
Gets the thread.local data (dict) for a given namespace.
Args:
namespace (string): The namespace, or key, of the data dict.
Returns:
(dict)
### Response:
def data(self, namespace):
"""
Gets the thread.local data (dict) for a given namespace.
Args:
namespace (string): The namespace, or key, of the data dict.
Returns:
(dict)
"""
assert namespace
if namespace in self._data:
return self._data[namespace]
new_data = {}
self._data[namespace] = new_data
return new_data |
def task_failure_message(task_report):
"""Task failure message."""
trace_list = traceback.format_tb(task_report['traceback'])
body = 'Error: task failure\n\n'
body += 'Task ID: {}\n\n'.format(task_report['task_id'])
body += 'Archive: {}\n\n'.format(task_report['archive'])
body += 'Docker image: {}\n\n'.format(task_report['image'])
body += 'Exception: {}\n\n'.format(task_report['exception'])
body += 'Traceback:\n {} {}'.format(
string.join(trace_list[:-1], ''), trace_list[-1])
return body | Task failure message. | Below is the the instruction that describes the task:
### Input:
Task failure message.
### Response:
def task_failure_message(task_report):
"""Task failure message."""
trace_list = traceback.format_tb(task_report['traceback'])
body = 'Error: task failure\n\n'
body += 'Task ID: {}\n\n'.format(task_report['task_id'])
body += 'Archive: {}\n\n'.format(task_report['archive'])
body += 'Docker image: {}\n\n'.format(task_report['image'])
body += 'Exception: {}\n\n'.format(task_report['exception'])
body += 'Traceback:\n {} {}'.format(
string.join(trace_list[:-1], ''), trace_list[-1])
return body |
def grant_authority(self, column=None, value=None, **kwargs):
"""Many-to-many table connecting grants and authority."""
return self._resolve_call('GIC_GRANT_AUTH', column, value, **kwargs) | Many-to-many table connecting grants and authority. | Below is the the instruction that describes the task:
### Input:
Many-to-many table connecting grants and authority.
### Response:
def grant_authority(self, column=None, value=None, **kwargs):
"""Many-to-many table connecting grants and authority."""
return self._resolve_call('GIC_GRANT_AUTH', column, value, **kwargs) |
def load(cls, path_to_file):
"""
Loads the image data from a file on disk and tries to guess the image MIME type
:param path_to_file: path to the source file
:type path_to_file: str
:return: a `pyowm.image.Image` instance
"""
import mimetypes
mimetypes.init()
mime = mimetypes.guess_type('file://%s' % path_to_file)[0]
img_type = ImageTypeEnum.lookup_by_mime_type(mime)
with open(path_to_file, 'rb') as f:
data = f.read()
return Image(data, image_type=img_type) | Loads the image data from a file on disk and tries to guess the image MIME type
:param path_to_file: path to the source file
:type path_to_file: str
:return: a `pyowm.image.Image` instance | Below is the the instruction that describes the task:
### Input:
Loads the image data from a file on disk and tries to guess the image MIME type
:param path_to_file: path to the source file
:type path_to_file: str
:return: a `pyowm.image.Image` instance
### Response:
def load(cls, path_to_file):
"""
Loads the image data from a file on disk and tries to guess the image MIME type
:param path_to_file: path to the source file
:type path_to_file: str
:return: a `pyowm.image.Image` instance
"""
import mimetypes
mimetypes.init()
mime = mimetypes.guess_type('file://%s' % path_to_file)[0]
img_type = ImageTypeEnum.lookup_by_mime_type(mime)
with open(path_to_file, 'rb') as f:
data = f.read()
return Image(data, image_type=img_type) |
def get_remote_revision(url, branch):
"""
GET REVISION OF A REMOTE BRANCH
"""
proc = Process("git remote revision", ["git", "ls-remote", url, "refs/heads/" + branch])
try:
while True:
raw_line = proc.stdout.pop()
line = raw_line.strip().decode('utf8')
if not line:
continue
return line.split("\t")[0]
finally:
try:
proc.join()
except Exception:
pass | GET REVISION OF A REMOTE BRANCH | Below is the the instruction that describes the task:
### Input:
GET REVISION OF A REMOTE BRANCH
### Response:
def get_remote_revision(url, branch):
"""
GET REVISION OF A REMOTE BRANCH
"""
proc = Process("git remote revision", ["git", "ls-remote", url, "refs/heads/" + branch])
try:
while True:
raw_line = proc.stdout.pop()
line = raw_line.strip().decode('utf8')
if not line:
continue
return line.split("\t")[0]
finally:
try:
proc.join()
except Exception:
pass |
def _get_content(cls, url, headers=HTTP_HEADERS):
"""
Get http content
:param url: contents url
:param headers: http header
:return: BeautifulSoup object
"""
session = requests.Session()
return session.get(url, headers=headers) | Get http content
:param url: contents url
:param headers: http header
:return: BeautifulSoup object | Below is the the instruction that describes the task:
### Input:
Get http content
:param url: contents url
:param headers: http header
:return: BeautifulSoup object
### Response:
def _get_content(cls, url, headers=HTTP_HEADERS):
"""
Get http content
:param url: contents url
:param headers: http header
:return: BeautifulSoup object
"""
session = requests.Session()
return session.get(url, headers=headers) |
def get_members(cls, session, team_or_id):
"""List the members for the team.
Args:
team_or_id (helpscout.models.Person or int): Team or the ID of
the team to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.Users): Users
iterator.
"""
if isinstance(team_or_id, Person):
team_or_id = team_or_id.id
return cls(
'/teams/%d/members.json' % team_or_id,
session=session,
out_type=User,
) | List the members for the team.
Args:
team_or_id (helpscout.models.Person or int): Team or the ID of
the team to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.Users): Users
iterator. | Below is the the instruction that describes the task:
### Input:
List the members for the team.
Args:
team_or_id (helpscout.models.Person or int): Team or the ID of
the team to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.Users): Users
iterator.
### Response:
def get_members(cls, session, team_or_id):
"""List the members for the team.
Args:
team_or_id (helpscout.models.Person or int): Team or the ID of
the team to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.Users): Users
iterator.
"""
if isinstance(team_or_id, Person):
team_or_id = team_or_id.id
return cls(
'/teams/%d/members.json' % team_or_id,
session=session,
out_type=User,
) |
def _create_request(self, verb, url, query_params=None, data=None, send_as_file=False):
"""Helper method to create a single post/get requests.
Args:
verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET
url - A string URL
query_params - None or a dict
data - None or a string or a dict
send_as_file - A boolean, should the data be sent as a file.
Returns:
requests.PreparedRequest
Raises:
InvalidRequestError - if an invalid verb is passed in.
"""
# Prepare a set of kwargs to make it easier to avoid missing default params.
kwargs = {
'headers': self._default_headers,
'params': query_params,
'timeout': self._req_timeout,
}
if MultiRequest._VERB_POST == verb:
if send_as_file:
kwargs['files'] = {'file': data}
else:
kwargs['data'] = data
return PreparedRequest(partial(self._session.post, url, **kwargs), url)
elif MultiRequest._VERB_GET == verb:
return PreparedRequest(partial(self._session.get, url, **kwargs), url)
else:
raise InvalidRequestError('Invalid verb {0}'.format(verb)) | Helper method to create a single post/get requests.
Args:
verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET
url - A string URL
query_params - None or a dict
data - None or a string or a dict
send_as_file - A boolean, should the data be sent as a file.
Returns:
requests.PreparedRequest
Raises:
InvalidRequestError - if an invalid verb is passed in. | Below is the the instruction that describes the task:
### Input:
Helper method to create a single post/get requests.
Args:
verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET
url - A string URL
query_params - None or a dict
data - None or a string or a dict
send_as_file - A boolean, should the data be sent as a file.
Returns:
requests.PreparedRequest
Raises:
InvalidRequestError - if an invalid verb is passed in.
### Response:
def _create_request(self, verb, url, query_params=None, data=None, send_as_file=False):
"""Helper method to create a single post/get requests.
Args:
verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET
url - A string URL
query_params - None or a dict
data - None or a string or a dict
send_as_file - A boolean, should the data be sent as a file.
Returns:
requests.PreparedRequest
Raises:
InvalidRequestError - if an invalid verb is passed in.
"""
# Prepare a set of kwargs to make it easier to avoid missing default params.
kwargs = {
'headers': self._default_headers,
'params': query_params,
'timeout': self._req_timeout,
}
if MultiRequest._VERB_POST == verb:
if send_as_file:
kwargs['files'] = {'file': data}
else:
kwargs['data'] = data
return PreparedRequest(partial(self._session.post, url, **kwargs), url)
elif MultiRequest._VERB_GET == verb:
return PreparedRequest(partial(self._session.get, url, **kwargs), url)
else:
raise InvalidRequestError('Invalid verb {0}'.format(verb)) |
def _getStrandType(self, strand):
"""
:param strand:
:return:
"""
# TODO make this a dictionary/enum: PLUS, MINUS, BOTH, UNKNOWN
strand_id = None
if strand == '+':
strand_id = self.globaltt['plus_strand']
elif strand == '-':
strand_id = self.globaltt['minus_strand']
elif strand == '.':
strand_id = self.globaltt['both_strand']
elif strand is None: # assume this is Unknown
pass
else:
LOG.warning("strand type could not be mapped: %s", str(strand))
return strand_id | :param strand:
:return: | Below is the the instruction that describes the task:
### Input:
:param strand:
:return:
### Response:
def _getStrandType(self, strand):
"""
:param strand:
:return:
"""
# TODO make this a dictionary/enum: PLUS, MINUS, BOTH, UNKNOWN
strand_id = None
if strand == '+':
strand_id = self.globaltt['plus_strand']
elif strand == '-':
strand_id = self.globaltt['minus_strand']
elif strand == '.':
strand_id = self.globaltt['both_strand']
elif strand is None: # assume this is Unknown
pass
else:
LOG.warning("strand type could not be mapped: %s", str(strand))
return strand_id |
def put(consul_url=None, token=None, key=None, value=None, **kwargs):
'''
Put values into Consul
:param consul_url: The Consul server URL.
:param key: The key to use as the starting point for the list.
:param value: The value to set the key to.
:param flags: This can be used to specify an unsigned value
between 0 and 2^64-1. Clients can choose to use
this however makes sense for their application.
:param cas: This flag is used to turn the PUT into a
Check-And-Set operation.
:param acquire: This flag is used to turn the PUT into a
lock acquisition operation.
:param release: This flag is used to turn the PUT into a
lock release operation.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.put key='web/key1' value="Hello there"
salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592'
salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592'
'''
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
if not key:
raise SaltInvocationError('Required argument "key" is missing.')
# Invalid to specified these together
conflicting_args = ['cas', 'release', 'acquire']
for _l1 in conflicting_args:
for _l2 in conflicting_args:
if _l1 in kwargs and _l2 in kwargs and _l1 != _l2:
raise SaltInvocationError('Using arguments `{0}` and `{1}`'
' together is invalid.'.format(_l1, _l2))
query_params = {}
available_sessions = session_list(consul_url=consul_url, return_list=True)
_current = get(consul_url=consul_url, key=key)
if 'flags' in kwargs:
if kwargs['flags'] >= 0 and kwargs['flags'] <= 2**64:
query_params['flags'] = kwargs['flags']
if 'cas' in kwargs:
if _current['res']:
if kwargs['cas'] == 0:
ret['message'] = ('Key {0} exists, index '
'must be non-zero.'.format(key))
ret['res'] = False
return ret
if kwargs['cas'] != _current['data']['ModifyIndex']:
ret['message'] = ('Key {0} exists, but indexes '
'do not match.'.format(key))
ret['res'] = False
return ret
query_params['cas'] = kwargs['cas']
else:
ret['message'] = ('Key {0} does not exists, '
'CAS argument can not be used.'.format(key))
ret['res'] = False
return ret
if 'acquire' in kwargs:
if kwargs['acquire'] not in available_sessions:
ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire'])
ret['res'] = False
return ret
query_params['acquire'] = kwargs['acquire']
if 'release' in kwargs:
if _current['res']:
if 'Session' in _current['data']:
if _current['data']['Session'] == kwargs['release']:
query_params['release'] = kwargs['release']
else:
ret['message'] = '{0} locked by another session.'.format(key)
ret['res'] = False
return ret
else:
ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire'])
ret['res'] = False
else:
log.error('Key {0} does not exist. Skipping release.')
data = value
function = 'kv/{0}'.format(key)
method = 'PUT'
ret = _query(consul_url=consul_url,
token=token,
function=function,
method=method,
data=data,
query_params=query_params)
if ret['res']:
ret['res'] = True
ret['data'] = 'Added key {0} with value {1}.'.format(key, value)
else:
ret['res'] = False
ret['data'] = 'Unable to add key {0} with value {1}.'.format(key, value)
return ret | Put values into Consul
:param consul_url: The Consul server URL.
:param key: The key to use as the starting point for the list.
:param value: The value to set the key to.
:param flags: This can be used to specify an unsigned value
between 0 and 2^64-1. Clients can choose to use
this however makes sense for their application.
:param cas: This flag is used to turn the PUT into a
Check-And-Set operation.
:param acquire: This flag is used to turn the PUT into a
lock acquisition operation.
:param release: This flag is used to turn the PUT into a
lock release operation.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.put key='web/key1' value="Hello there"
salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592'
salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592' | Below is the the instruction that describes the task:
### Input:
Put values into Consul
:param consul_url: The Consul server URL.
:param key: The key to use as the starting point for the list.
:param value: The value to set the key to.
:param flags: This can be used to specify an unsigned value
between 0 and 2^64-1. Clients can choose to use
this however makes sense for their application.
:param cas: This flag is used to turn the PUT into a
Check-And-Set operation.
:param acquire: This flag is used to turn the PUT into a
lock acquisition operation.
:param release: This flag is used to turn the PUT into a
lock release operation.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.put key='web/key1' value="Hello there"
salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592'
salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592'
### Response:
def put(consul_url=None, token=None, key=None, value=None, **kwargs):
'''
Put values into Consul
:param consul_url: The Consul server URL.
:param key: The key to use as the starting point for the list.
:param value: The value to set the key to.
:param flags: This can be used to specify an unsigned value
between 0 and 2^64-1. Clients can choose to use
this however makes sense for their application.
:param cas: This flag is used to turn the PUT into a
Check-And-Set operation.
:param acquire: This flag is used to turn the PUT into a
lock acquisition operation.
:param release: This flag is used to turn the PUT into a
lock release operation.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.put key='web/key1' value="Hello there"
salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592'
salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592'
'''
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
if not key:
raise SaltInvocationError('Required argument "key" is missing.')
# Invalid to specified these together
conflicting_args = ['cas', 'release', 'acquire']
for _l1 in conflicting_args:
for _l2 in conflicting_args:
if _l1 in kwargs and _l2 in kwargs and _l1 != _l2:
raise SaltInvocationError('Using arguments `{0}` and `{1}`'
' together is invalid.'.format(_l1, _l2))
query_params = {}
available_sessions = session_list(consul_url=consul_url, return_list=True)
_current = get(consul_url=consul_url, key=key)
if 'flags' in kwargs:
if kwargs['flags'] >= 0 and kwargs['flags'] <= 2**64:
query_params['flags'] = kwargs['flags']
if 'cas' in kwargs:
if _current['res']:
if kwargs['cas'] == 0:
ret['message'] = ('Key {0} exists, index '
'must be non-zero.'.format(key))
ret['res'] = False
return ret
if kwargs['cas'] != _current['data']['ModifyIndex']:
ret['message'] = ('Key {0} exists, but indexes '
'do not match.'.format(key))
ret['res'] = False
return ret
query_params['cas'] = kwargs['cas']
else:
ret['message'] = ('Key {0} does not exists, '
'CAS argument can not be used.'.format(key))
ret['res'] = False
return ret
if 'acquire' in kwargs:
if kwargs['acquire'] not in available_sessions:
ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire'])
ret['res'] = False
return ret
query_params['acquire'] = kwargs['acquire']
if 'release' in kwargs:
if _current['res']:
if 'Session' in _current['data']:
if _current['data']['Session'] == kwargs['release']:
query_params['release'] = kwargs['release']
else:
ret['message'] = '{0} locked by another session.'.format(key)
ret['res'] = False
return ret
else:
ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire'])
ret['res'] = False
else:
log.error('Key {0} does not exist. Skipping release.')
data = value
function = 'kv/{0}'.format(key)
method = 'PUT'
ret = _query(consul_url=consul_url,
token=token,
function=function,
method=method,
data=data,
query_params=query_params)
if ret['res']:
ret['res'] = True
ret['data'] = 'Added key {0} with value {1}.'.format(key, value)
else:
ret['res'] = False
ret['data'] = 'Unable to add key {0} with value {1}.'.format(key, value)
return ret |
def close(self):
"""Close connection to server."""
try:
self._socket.sendall('quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass | Close connection to server. | Below is the the instruction that describes the task:
### Input:
Close connection to server.
### Response:
def close(self):
"""Close connection to server."""
try:
self._socket.sendall('quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass |
def attribute_map_set(self, address, attribute_maps,
route_dist=None, route_family=RF_VPN_V4):
"""This method sets attribute mapping to a neighbor.
attribute mapping can be used when you want to apply
attribute to BGPUpdate under specific conditions.
``address`` specifies the IP address of the neighbor
``attribute_maps`` specifies attribute_map list that are used
before paths are advertised. All the items in the list must
be an instance of AttributeMap class
``route_dist`` specifies route dist in which attribute_maps
are added.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
We can set AttributeMap to a neighbor as follows::
pref_filter = PrefixFilter('192.168.103.0/30',
PrefixFilter.POLICY_PERMIT)
attribute_map = AttributeMap([pref_filter],
AttributeMap.ATTR_LOCAL_PREF, 250)
speaker.attribute_map_set('192.168.50.102', [attribute_map])
"""
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
func_name = 'neighbor.attribute_map.set'
param = {
neighbors.IP_ADDRESS: address,
neighbors.ATTRIBUTE_MAP: attribute_maps,
}
if route_dist is not None:
param[vrfs.ROUTE_DISTINGUISHER] = route_dist
param[vrfs.VRF_RF] = route_family
call(func_name, **param) | This method sets attribute mapping to a neighbor.
attribute mapping can be used when you want to apply
attribute to BGPUpdate under specific conditions.
``address`` specifies the IP address of the neighbor
``attribute_maps`` specifies attribute_map list that are used
before paths are advertised. All the items in the list must
be an instance of AttributeMap class
``route_dist`` specifies route dist in which attribute_maps
are added.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
We can set AttributeMap to a neighbor as follows::
pref_filter = PrefixFilter('192.168.103.0/30',
PrefixFilter.POLICY_PERMIT)
attribute_map = AttributeMap([pref_filter],
AttributeMap.ATTR_LOCAL_PREF, 250)
speaker.attribute_map_set('192.168.50.102', [attribute_map]) | Below is the the instruction that describes the task:
### Input:
This method sets attribute mapping to a neighbor.
attribute mapping can be used when you want to apply
attribute to BGPUpdate under specific conditions.
``address`` specifies the IP address of the neighbor
``attribute_maps`` specifies attribute_map list that are used
before paths are advertised. All the items in the list must
be an instance of AttributeMap class
``route_dist`` specifies route dist in which attribute_maps
are added.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
We can set AttributeMap to a neighbor as follows::
pref_filter = PrefixFilter('192.168.103.0/30',
PrefixFilter.POLICY_PERMIT)
attribute_map = AttributeMap([pref_filter],
AttributeMap.ATTR_LOCAL_PREF, 250)
speaker.attribute_map_set('192.168.50.102', [attribute_map])
### Response:
def attribute_map_set(self, address, attribute_maps,
route_dist=None, route_family=RF_VPN_V4):
"""This method sets attribute mapping to a neighbor.
attribute mapping can be used when you want to apply
attribute to BGPUpdate under specific conditions.
``address`` specifies the IP address of the neighbor
``attribute_maps`` specifies attribute_map list that are used
before paths are advertised. All the items in the list must
be an instance of AttributeMap class
``route_dist`` specifies route dist in which attribute_maps
are added.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
We can set AttributeMap to a neighbor as follows::
pref_filter = PrefixFilter('192.168.103.0/30',
PrefixFilter.POLICY_PERMIT)
attribute_map = AttributeMap([pref_filter],
AttributeMap.ATTR_LOCAL_PREF, 250)
speaker.attribute_map_set('192.168.50.102', [attribute_map])
"""
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
func_name = 'neighbor.attribute_map.set'
param = {
neighbors.IP_ADDRESS: address,
neighbors.ATTRIBUTE_MAP: attribute_maps,
}
if route_dist is not None:
param[vrfs.ROUTE_DISTINGUISHER] = route_dist
param[vrfs.VRF_RF] = route_family
call(func_name, **param) |
def _orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p)
the data to be orthogonalized
Returns
-------
X: array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized
"""
if X.size == X.shape[0]:
return X
from scipy.linalg import pinv, norm
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i]))
# X[:, i] /= norm(X[:, i])
return X | Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p)
the data to be orthogonalized
Returns
-------
X: array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized | Below is the the instruction that describes the task:
### Input:
Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p)
the data to be orthogonalized
Returns
-------
X: array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized
### Response:
def _orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p)
the data to be orthogonalized
Returns
-------
X: array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized
"""
if X.size == X.shape[0]:
return X
from scipy.linalg import pinv, norm
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i]))
# X[:, i] /= norm(X[:, i])
return X |
def process_vasprun(self, dir_name, taskname, filename):
"""
Process a vasprun.xml file.
"""
vasprun_file = os.path.join(dir_name, filename)
if self.parse_projected_eigen and (self.parse_projected_eigen != 'final' or \
taskname == self.runs[-1]):
parse_projected_eigen = True
else:
parse_projected_eigen = False
r = Vasprun(vasprun_file,parse_projected_eigen=parse_projected_eigen)
d = r.as_dict()
d["dir_name"] = os.path.abspath(dir_name)
d["completed_at"] = \
str(datetime.datetime.fromtimestamp(os.path.getmtime(
vasprun_file)))
d["cif"] = str(CifWriter(r.final_structure))
d["density"] = r.final_structure.density
if self.parse_dos and (self.parse_dos != 'final' \
or taskname == self.runs[-1]):
try:
d["dos"] = r.complete_dos.as_dict()
except Exception:
logger.warning("No valid dos data exist in {}.\n Skipping dos"
.format(dir_name))
if taskname == "relax1" or taskname == "relax2":
d["task"] = {"type": "aflow", "name": taskname}
else:
d["task"] = {"type": taskname, "name": taskname}
d["oxide_type"] = oxide_type(r.final_structure)
return d | Process a vasprun.xml file. | Below is the the instruction that describes the task:
### Input:
Process a vasprun.xml file.
### Response:
def process_vasprun(self, dir_name, taskname, filename):
"""
Process a vasprun.xml file.
"""
vasprun_file = os.path.join(dir_name, filename)
if self.parse_projected_eigen and (self.parse_projected_eigen != 'final' or \
taskname == self.runs[-1]):
parse_projected_eigen = True
else:
parse_projected_eigen = False
r = Vasprun(vasprun_file,parse_projected_eigen=parse_projected_eigen)
d = r.as_dict()
d["dir_name"] = os.path.abspath(dir_name)
d["completed_at"] = \
str(datetime.datetime.fromtimestamp(os.path.getmtime(
vasprun_file)))
d["cif"] = str(CifWriter(r.final_structure))
d["density"] = r.final_structure.density
if self.parse_dos and (self.parse_dos != 'final' \
or taskname == self.runs[-1]):
try:
d["dos"] = r.complete_dos.as_dict()
except Exception:
logger.warning("No valid dos data exist in {}.\n Skipping dos"
.format(dir_name))
if taskname == "relax1" or taskname == "relax2":
d["task"] = {"type": "aflow", "name": taskname}
else:
d["task"] = {"type": taskname, "name": taskname}
d["oxide_type"] = oxide_type(r.final_structure)
return d |
def update_alarm(self, alarm, criteria=None, disabled=False,
label=None, name=None, metadata=None):
"""
Updates an existing alarm on this entity.
"""
return self._alarm_manager.update(alarm, criteria=criteria,
disabled=disabled, label=label, name=name, metadata=metadata) | Updates an existing alarm on this entity. | Below is the the instruction that describes the task:
### Input:
Updates an existing alarm on this entity.
### Response:
def update_alarm(self, alarm, criteria=None, disabled=False,
label=None, name=None, metadata=None):
"""
Updates an existing alarm on this entity.
"""
return self._alarm_manager.update(alarm, criteria=criteria,
disabled=disabled, label=label, name=name, metadata=metadata) |
def reverse(array):
"""
returns a reversed numpy array
"""
l = list(array)
l.reverse()
return _n.array(l) | returns a reversed numpy array | Below is the the instruction that describes the task:
### Input:
returns a reversed numpy array
### Response:
def reverse(array):
"""
returns a reversed numpy array
"""
l = list(array)
l.reverse()
return _n.array(l) |
def processes(self):
"""Initialise and return the list of processes associated with this pool"""
if self._processes is None:
self._processes = []
for p in range(self.workers):
t = Task(self._target, self._args, self._kwargs)
t.name = "%s-%d" % (self.target_name, p)
self._processes.append(t)
return self._processes | Initialise and return the list of processes associated with this pool | Below is the the instruction that describes the task:
### Input:
Initialise and return the list of processes associated with this pool
### Response:
def processes(self):
"""Initialise and return the list of processes associated with this pool"""
if self._processes is None:
self._processes = []
for p in range(self.workers):
t = Task(self._target, self._args, self._kwargs)
t.name = "%s-%d" % (self.target_name, p)
self._processes.append(t)
return self._processes |
def _isophote_list_to_table(isophote_list):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
"""
properties = OrderedDict()
properties['sma'] = 'sma'
properties['intens'] = 'intens'
properties['int_err'] = 'intens_err'
properties['eps'] = 'ellipticity'
properties['ellip_err'] = 'ellipticity_err'
properties['pa'] = 'pa'
properties['pa_err'] = 'pa_err'
properties['grad_r_error'] = 'grad_rerr'
properties['ndata'] = 'ndata'
properties['nflag'] = 'flag'
properties['niter'] = 'niter'
properties['stop_code'] = 'stop_code'
isotable = QTable()
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable | Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters. | Below is the the instruction that describes the task:
### Input:
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
### Response:
def _isophote_list_to_table(isophote_list):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
"""
properties = OrderedDict()
properties['sma'] = 'sma'
properties['intens'] = 'intens'
properties['int_err'] = 'intens_err'
properties['eps'] = 'ellipticity'
properties['ellip_err'] = 'ellipticity_err'
properties['pa'] = 'pa'
properties['pa_err'] = 'pa_err'
properties['grad_r_error'] = 'grad_rerr'
properties['ndata'] = 'ndata'
properties['nflag'] = 'flag'
properties['niter'] = 'niter'
properties['stop_code'] = 'stop_code'
isotable = QTable()
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable |
async def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
await self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type) | Handle incoming packets from the server. | Below is the the instruction that describes the task:
### Input:
Handle incoming packets from the server.
### Response:
async def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
await self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type) |
def _update_text_record(self, witness, text_id):
"""Updates the record with `text_id` with `witness`\'s checksum and
token count.
:param withness: witness to update from
:type witness: `WitnessText`
:param text_id: database ID of Text record
:type text_id: `int`
"""
checksum = witness.get_checksum()
token_count = len(witness.get_tokens())
with self._conn:
self._conn.execute(constants.UPDATE_TEXT_SQL,
[checksum, token_count, text_id]) | Updates the record with `text_id` with `witness`\'s checksum and
token count.
:param withness: witness to update from
:type witness: `WitnessText`
:param text_id: database ID of Text record
:type text_id: `int` | Below is the the instruction that describes the task:
### Input:
Updates the record with `text_id` with `witness`\'s checksum and
token count.
:param withness: witness to update from
:type witness: `WitnessText`
:param text_id: database ID of Text record
:type text_id: `int`
### Response:
def _update_text_record(self, witness, text_id):
"""Updates the record with `text_id` with `witness`\'s checksum and
token count.
:param withness: witness to update from
:type witness: `WitnessText`
:param text_id: database ID of Text record
:type text_id: `int`
"""
checksum = witness.get_checksum()
token_count = len(witness.get_tokens())
with self._conn:
self._conn.execute(constants.UPDATE_TEXT_SQL,
[checksum, token_count, text_id]) |
def find(self, search_str, by='name', language='en'):
'''Select values by attribute
Args:
searchstr(str): the string to search for
by(str): the name of the attribute to search by, defaults to 'name'
The specified attribute must be either a string
or a dict mapping language codes to strings.
Such attributes occur, e.g. in :class:`pandasdmx.model.NameableArtefact` which is
a base class for :class:`pandasdmx.model.DataFlowDefinition` and many others.
language(str): language code specifying the language of the text to be searched, defaults to 'en'
Returns:
DictLike: items where value.<by> contains the search_str. International strings
stored as dict with language codes as keys are
searched. Capitalization is ignored.
'''
s = search_str.lower()
# We distinguish between international strings stored as dict such as
# name.en, name.fr, and normal strings.
if by in ['name', 'description']:
get_field = lambda obj: getattr(obj, by)[language]
else: # normal string
get_field = lambda obj: getattr(obj, by)
return DictLike(result for result in self.items()
if s in get_field(result[1]).lower()) | Select values by attribute
Args:
searchstr(str): the string to search for
by(str): the name of the attribute to search by, defaults to 'name'
The specified attribute must be either a string
or a dict mapping language codes to strings.
Such attributes occur, e.g. in :class:`pandasdmx.model.NameableArtefact` which is
a base class for :class:`pandasdmx.model.DataFlowDefinition` and many others.
language(str): language code specifying the language of the text to be searched, defaults to 'en'
Returns:
DictLike: items where value.<by> contains the search_str. International strings
stored as dict with language codes as keys are
searched. Capitalization is ignored. | Below is the the instruction that describes the task:
### Input:
Select values by attribute
Args:
searchstr(str): the string to search for
by(str): the name of the attribute to search by, defaults to 'name'
The specified attribute must be either a string
or a dict mapping language codes to strings.
Such attributes occur, e.g. in :class:`pandasdmx.model.NameableArtefact` which is
a base class for :class:`pandasdmx.model.DataFlowDefinition` and many others.
language(str): language code specifying the language of the text to be searched, defaults to 'en'
Returns:
DictLike: items where value.<by> contains the search_str. International strings
stored as dict with language codes as keys are
searched. Capitalization is ignored.
### Response:
def find(self, search_str, by='name', language='en'):
'''Select values by attribute
Args:
searchstr(str): the string to search for
by(str): the name of the attribute to search by, defaults to 'name'
The specified attribute must be either a string
or a dict mapping language codes to strings.
Such attributes occur, e.g. in :class:`pandasdmx.model.NameableArtefact` which is
a base class for :class:`pandasdmx.model.DataFlowDefinition` and many others.
language(str): language code specifying the language of the text to be searched, defaults to 'en'
Returns:
DictLike: items where value.<by> contains the search_str. International strings
stored as dict with language codes as keys are
searched. Capitalization is ignored.
'''
s = search_str.lower()
# We distinguish between international strings stored as dict such as
# name.en, name.fr, and normal strings.
if by in ['name', 'description']:
get_field = lambda obj: getattr(obj, by)[language]
else: # normal string
get_field = lambda obj: getattr(obj, by)
return DictLike(result for result in self.items()
if s in get_field(result[1]).lower()) |
def readme():
"""Try to read README.rst or return empty string if failed.
:return: File contents.
:rtype: str
"""
path = os.path.realpath(os.path.join(os.path.dirname(__file__), 'README.rst'))
handle = None
try:
handle = codecs.open(path, encoding='utf-8')
return handle.read(131072)
except IOError:
return ''
finally:
getattr(handle, 'close', lambda: None)() | Try to read README.rst or return empty string if failed.
:return: File contents.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Try to read README.rst or return empty string if failed.
:return: File contents.
:rtype: str
### Response:
def readme():
"""Try to read README.rst or return empty string if failed.
:return: File contents.
:rtype: str
"""
path = os.path.realpath(os.path.join(os.path.dirname(__file__), 'README.rst'))
handle = None
try:
handle = codecs.open(path, encoding='utf-8')
return handle.read(131072)
except IOError:
return ''
finally:
getattr(handle, 'close', lambda: None)() |
def from_unknown_text(text, strict=False):
"""
Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
if text.startswith("+"):
crs = from_proj4(text, strict)
elif text.startswith(("PROJCS[","GEOGCS[")):
crs = from_unknown_wkt(text, strict)
#elif text.startswith("urn:"):
# crs = from_ogc_urn(text, strict)
elif text.startswith("EPSG:"):
crs = from_epsg_code(text.split(":")[1])
elif text.startswith("ESRI:"):
crs = from_esri_code(text.split(":")[1])
elif text.startswith("SR-ORG:"):
crs = from_sr_code(text.split(":")[1])
else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats")
return crs | Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object. | Below is the the instruction that describes the task:
### Input:
Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
### Response:
def from_unknown_text(text, strict=False):
"""
Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
if text.startswith("+"):
crs = from_proj4(text, strict)
elif text.startswith(("PROJCS[","GEOGCS[")):
crs = from_unknown_wkt(text, strict)
#elif text.startswith("urn:"):
# crs = from_ogc_urn(text, strict)
elif text.startswith("EPSG:"):
crs = from_epsg_code(text.split(":")[1])
elif text.startswith("ESRI:"):
crs = from_esri_code(text.split(":")[1])
elif text.startswith("SR-ORG:"):
crs = from_sr_code(text.split(":")[1])
else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats")
return crs |
def sgn_prod(p1, p2):
r"""
Multiply two Paulis and track the phase.
$P_3 = P_1 \otimes P_2$: X*Y
Args:
p1 (Pauli): pauli 1
p2 (Pauli): pauli 2
Returns:
Pauli: the multiplied pauli
complex: the sign of the multiplication, 1, -1, 1j or -1j
"""
phase = Pauli._prod_phase(p1, p2)
new_pauli = p1 * p2
return new_pauli, phase | r"""
Multiply two Paulis and track the phase.
$P_3 = P_1 \otimes P_2$: X*Y
Args:
p1 (Pauli): pauli 1
p2 (Pauli): pauli 2
Returns:
Pauli: the multiplied pauli
complex: the sign of the multiplication, 1, -1, 1j or -1j | Below is the the instruction that describes the task:
### Input:
r"""
Multiply two Paulis and track the phase.
$P_3 = P_1 \otimes P_2$: X*Y
Args:
p1 (Pauli): pauli 1
p2 (Pauli): pauli 2
Returns:
Pauli: the multiplied pauli
complex: the sign of the multiplication, 1, -1, 1j or -1j
### Response:
def sgn_prod(p1, p2):
r"""
Multiply two Paulis and track the phase.
$P_3 = P_1 \otimes P_2$: X*Y
Args:
p1 (Pauli): pauli 1
p2 (Pauli): pauli 2
Returns:
Pauli: the multiplied pauli
complex: the sign of the multiplication, 1, -1, 1j or -1j
"""
phase = Pauli._prod_phase(p1, p2)
new_pauli = p1 * p2
return new_pauli, phase |
def _on_decisions_event(self, event=None, **kwargs):
"""Called when an Event is received on the decisions channel. Saves
the value in group_decisions. If num_subperiods is None, immediately
broadcasts the event back out on the group_decisions channel.
"""
if not self.ran_ready_function:
logger.warning('ignoring decision from {} before when_all_players_ready: {}'.format(event.participant.code, event.value))
return
with track('_on_decisions_event'):
self.group_decisions[event.participant.code] = event.value
self._group_decisions_updated = True
self.save(update_fields=['group_decisions', '_group_decisions_updated'])
if not self.num_subperiods() and not self.rate_limit():
self.send('group_decisions', self.group_decisions) | Called when an Event is received on the decisions channel. Saves
the value in group_decisions. If num_subperiods is None, immediately
broadcasts the event back out on the group_decisions channel. | Below is the the instruction that describes the task:
### Input:
Called when an Event is received on the decisions channel. Saves
the value in group_decisions. If num_subperiods is None, immediately
broadcasts the event back out on the group_decisions channel.
### Response:
def _on_decisions_event(self, event=None, **kwargs):
"""Called when an Event is received on the decisions channel. Saves
the value in group_decisions. If num_subperiods is None, immediately
broadcasts the event back out on the group_decisions channel.
"""
if not self.ran_ready_function:
logger.warning('ignoring decision from {} before when_all_players_ready: {}'.format(event.participant.code, event.value))
return
with track('_on_decisions_event'):
self.group_decisions[event.participant.code] = event.value
self._group_decisions_updated = True
self.save(update_fields=['group_decisions', '_group_decisions_updated'])
if not self.num_subperiods() and not self.rate_limit():
self.send('group_decisions', self.group_decisions) |
def is_config_container(v):
"""
checks whether v is of type list,dict or Config
"""
cls = type(v)
return (
issubclass(cls, list) or
issubclass(cls, dict) or
issubclass(cls, Config)
) | checks whether v is of type list,dict or Config | Below is the the instruction that describes the task:
### Input:
checks whether v is of type list,dict or Config
### Response:
def is_config_container(v):
"""
checks whether v is of type list,dict or Config
"""
cls = type(v)
return (
issubclass(cls, list) or
issubclass(cls, dict) or
issubclass(cls, Config)
) |
def _maxiter_default(self):
""" Trait initialiser.
"""
mode = self.mode
if mode == "KK":
return 100 * len(self.nodes)
elif mode == "major":
return 200
else:
return 600 | Trait initialiser. | Below is the the instruction that describes the task:
### Input:
Trait initialiser.
### Response:
def _maxiter_default(self):
""" Trait initialiser.
"""
mode = self.mode
if mode == "KK":
return 100 * len(self.nodes)
elif mode == "major":
return 200
else:
return 600 |
def ts_stream_keys(self, table, timeout=None):
"""
Streams keys from a timeseries table, returning an iterator that
yields lists of keys.
"""
msg_code = riak.pb.messages.MSG_CODE_TS_LIST_KEYS_REQ
codec = self._get_codec(msg_code)
msg = codec.encode_timeseries_listkeysreq(table, timeout)
self._send_msg(msg.msg_code, msg.data)
return PbufTsKeyStream(self, codec, self._ts_convert_timestamp) | Streams keys from a timeseries table, returning an iterator that
yields lists of keys. | Below is the the instruction that describes the task:
### Input:
Streams keys from a timeseries table, returning an iterator that
yields lists of keys.
### Response:
def ts_stream_keys(self, table, timeout=None):
"""
Streams keys from a timeseries table, returning an iterator that
yields lists of keys.
"""
msg_code = riak.pb.messages.MSG_CODE_TS_LIST_KEYS_REQ
codec = self._get_codec(msg_code)
msg = codec.encode_timeseries_listkeysreq(table, timeout)
self._send_msg(msg.msg_code, msg.data)
return PbufTsKeyStream(self, codec, self._ts_convert_timestamp) |
def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table):
"""Extracts identifier mappings from the SruDbIdMapTable table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
esedb_table (pyesedb.table): table.
Returns:
dict[int, str]: mapping of numeric identifiers to their string
representation.
"""
identifier_mappings = {}
for esedb_record in esedb_table.records:
if parser_mediator.abort:
break
identifier, mapped_value = self._ParseIdentifierMappingRecord(
parser_mediator, esedb_table.name, esedb_record)
if identifier is None or mapped_value is None:
continue
if identifier in identifier_mappings:
parser_mediator.ProduceExtractionWarning(
'identifier: {0:d} already exists in mappings.'.format(identifier))
continue
identifier_mappings[identifier] = mapped_value
return identifier_mappings | Extracts identifier mappings from the SruDbIdMapTable table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
esedb_table (pyesedb.table): table.
Returns:
dict[int, str]: mapping of numeric identifiers to their string
representation. | Below is the the instruction that describes the task:
### Input:
Extracts identifier mappings from the SruDbIdMapTable table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
esedb_table (pyesedb.table): table.
Returns:
dict[int, str]: mapping of numeric identifiers to their string
representation.
### Response:
def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table):
"""Extracts identifier mappings from the SruDbIdMapTable table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
esedb_table (pyesedb.table): table.
Returns:
dict[int, str]: mapping of numeric identifiers to their string
representation.
"""
identifier_mappings = {}
for esedb_record in esedb_table.records:
if parser_mediator.abort:
break
identifier, mapped_value = self._ParseIdentifierMappingRecord(
parser_mediator, esedb_table.name, esedb_record)
if identifier is None or mapped_value is None:
continue
if identifier in identifier_mappings:
parser_mediator.ProduceExtractionWarning(
'identifier: {0:d} already exists in mappings.'.format(identifier))
continue
identifier_mappings[identifier] = mapped_value
return identifier_mappings |
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message) | Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user | Below is the the instruction that describes the task:
### Input:
Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
### Response:
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message) |
def _api_get(self, url, **kwargs):
"""
Convenience method for getting
"""
response = self.session.get(
url=url,
headers=self._get_api_headers(),
**kwargs
)
if not response.ok:
raise ServerException(
'{0}: {1}'.format(
response.status_code,
response.text or response.reason
))
return response.json() | Convenience method for getting | Below is the the instruction that describes the task:
### Input:
Convenience method for getting
### Response:
def _api_get(self, url, **kwargs):
"""
Convenience method for getting
"""
response = self.session.get(
url=url,
headers=self._get_api_headers(),
**kwargs
)
if not response.ok:
raise ServerException(
'{0}: {1}'.format(
response.status_code,
response.text or response.reason
))
return response.json() |
def generate(self, nb_steps=100, averaging=50, rescale=True):
"""Generate data from an FCM containing cycles."""
if self.cfunctions is None:
self.init_variables()
new_df = pd.DataFrame()
causes = [[c for c in np.nonzero(self.adjacency_matrix[:, j])[0]]
for j in range(self.nodes)]
values = [[] for i in range(self.nodes)]
for i in range(nb_steps):
for j in range(self.nodes):
new_df["V" + str(j)] = self.cfunctions[j](self.data.iloc[:, causes[j]].values)[:, 0]
if rescale:
new_df["V" + str(j)] = scale(new_df["V" + str(j)])
if i > nb_steps-averaging:
values[j].append(new_df["V" + str(j)])
self.data = new_df
self.data = pd.DataFrame(np.array([np.mean(values[i], axis=0)
for i in range(self.nodes)]).transpose(),
columns=["V{}".format(j) for j in range(self.nodes)])
return self.g, self.data | Generate data from an FCM containing cycles. | Below is the the instruction that describes the task:
### Input:
Generate data from an FCM containing cycles.
### Response:
def generate(self, nb_steps=100, averaging=50, rescale=True):
"""Generate data from an FCM containing cycles."""
if self.cfunctions is None:
self.init_variables()
new_df = pd.DataFrame()
causes = [[c for c in np.nonzero(self.adjacency_matrix[:, j])[0]]
for j in range(self.nodes)]
values = [[] for i in range(self.nodes)]
for i in range(nb_steps):
for j in range(self.nodes):
new_df["V" + str(j)] = self.cfunctions[j](self.data.iloc[:, causes[j]].values)[:, 0]
if rescale:
new_df["V" + str(j)] = scale(new_df["V" + str(j)])
if i > nb_steps-averaging:
values[j].append(new_df["V" + str(j)])
self.data = new_df
self.data = pd.DataFrame(np.array([np.mean(values[i], axis=0)
for i in range(self.nodes)]).transpose(),
columns=["V{}".format(j) for j in range(self.nodes)])
return self.g, self.data |
def deleteEdge(self, edge, waitForSync = False) :
"""removes an edge from the graph"""
url = "%s/edge/%s" % (self.URL, edge._id)
r = self.connection.session.delete(url, params = {'waitForSync' : waitForSync})
if r.status_code == 200 or r.status_code == 202 :
return True
raise DeletionError("Unable to delete edge, %s" % edge._id, r.json()) | removes an edge from the graph | Below is the the instruction that describes the task:
### Input:
removes an edge from the graph
### Response:
def deleteEdge(self, edge, waitForSync = False) :
"""removes an edge from the graph"""
url = "%s/edge/%s" % (self.URL, edge._id)
r = self.connection.session.delete(url, params = {'waitForSync' : waitForSync})
if r.status_code == 200 or r.status_code == 202 :
return True
raise DeletionError("Unable to delete edge, %s" % edge._id, r.json()) |
def lrange(self, key, start, stop):
"""Emulate lrange."""
redis_list = self._get_list(key, 'LRANGE')
start, stop = self._translate_range(len(redis_list), start, stop)
return redis_list[start:stop + 1] | Emulate lrange. | Below is the the instruction that describes the task:
### Input:
Emulate lrange.
### Response:
def lrange(self, key, start, stop):
"""Emulate lrange."""
redis_list = self._get_list(key, 'LRANGE')
start, stop = self._translate_range(len(redis_list), start, stop)
return redis_list[start:stop + 1] |
def close(self):
"""
Call :func:`os.close` on :attr:`fd` if it is not :data:`None`,
then set it to :data:`None`.
"""
if not self.closed:
_vv and IOLOG.debug('%r.close()', self)
self.closed = True
os.close(self.fd) | Call :func:`os.close` on :attr:`fd` if it is not :data:`None`,
then set it to :data:`None`. | Below is the the instruction that describes the task:
### Input:
Call :func:`os.close` on :attr:`fd` if it is not :data:`None`,
then set it to :data:`None`.
### Response:
def close(self):
"""
Call :func:`os.close` on :attr:`fd` if it is not :data:`None`,
then set it to :data:`None`.
"""
if not self.closed:
_vv and IOLOG.debug('%r.close()', self)
self.closed = True
os.close(self.fd) |
def break_bond(self, ind1, ind2, tol=0.2):
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
sites = self._sites
clusters = [[sites[ind1]], [sites[ind2]]]
sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return (self.__class__.from_sites(cluster)
for cluster in clusters) | Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond. | Below is the the instruction that describes the task:
### Input:
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
### Response:
def break_bond(self, ind1, ind2, tol=0.2):
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
sites = self._sites
clusters = [[sites[ind1]], [sites[ind2]]]
sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return (self.__class__.from_sites(cluster)
for cluster in clusters) |
def _prepare_wsdl_objects(self):
"""
Preps the WSDL data structures for the user.
"""
self.DeletionControlType = self.client.factory.create('DeletionControlType')
self.TrackingId = self.client.factory.create('TrackingId')
self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType') | Preps the WSDL data structures for the user. | Below is the the instruction that describes the task:
### Input:
Preps the WSDL data structures for the user.
### Response:
def _prepare_wsdl_objects(self):
"""
Preps the WSDL data structures for the user.
"""
self.DeletionControlType = self.client.factory.create('DeletionControlType')
self.TrackingId = self.client.factory.create('TrackingId')
self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType') |
def filter_service_by_hostgroup_name(group):
"""Filter for service
Filter on hostgroup
:param group: hostgroup to filter
:type group: str
:return: Filter
:rtype: bool
"""
def inner_filter(items):
"""Inner filter for service. Accept if hostgroup in service.host.hostgroups"""
service = items["service"]
host = items["hosts"][service.host]
if service is None or host is None:
return False
return group in [items["hostgroups"][g].hostgroup_name for g in host.hostgroups]
return inner_filter | Filter for service
Filter on hostgroup
:param group: hostgroup to filter
:type group: str
:return: Filter
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Filter for service
Filter on hostgroup
:param group: hostgroup to filter
:type group: str
:return: Filter
:rtype: bool
### Response:
def filter_service_by_hostgroup_name(group):
"""Filter for service
Filter on hostgroup
:param group: hostgroup to filter
:type group: str
:return: Filter
:rtype: bool
"""
def inner_filter(items):
"""Inner filter for service. Accept if hostgroup in service.host.hostgroups"""
service = items["service"]
host = items["hosts"][service.host]
if service is None or host is None:
return False
return group in [items["hostgroups"][g].hostgroup_name for g in host.hostgroups]
return inner_filter |
def lat_from_inc(inc, a95=None):
"""
Calculate paleolatitude from inclination using the dipole equation
Required Parameter
----------
inc: (paleo)magnetic inclination in degrees
Optional Parameter
----------
a95: 95% confidence interval from Fisher mean
Returns
----------
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat
"""
rad = old_div(np.pi, 180.)
paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad)
if a95 is not None:
paleo_lat_max = old_div(
np.arctan(0.5 * np.tan((inc + a95) * rad)), rad)
paleo_lat_min = old_div(
np.arctan(0.5 * np.tan((inc - a95) * rad)), rad)
return paleo_lat, paleo_lat_max, paleo_lat_min
else:
return paleo_lat | Calculate paleolatitude from inclination using the dipole equation
Required Parameter
----------
inc: (paleo)magnetic inclination in degrees
Optional Parameter
----------
a95: 95% confidence interval from Fisher mean
Returns
----------
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat | Below is the the instruction that describes the task:
### Input:
Calculate paleolatitude from inclination using the dipole equation
Required Parameter
----------
inc: (paleo)magnetic inclination in degrees
Optional Parameter
----------
a95: 95% confidence interval from Fisher mean
Returns
----------
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat
### Response:
def lat_from_inc(inc, a95=None):
"""
Calculate paleolatitude from inclination using the dipole equation
Required Parameter
----------
inc: (paleo)magnetic inclination in degrees
Optional Parameter
----------
a95: 95% confidence interval from Fisher mean
Returns
----------
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat
"""
rad = old_div(np.pi, 180.)
paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad)
if a95 is not None:
paleo_lat_max = old_div(
np.arctan(0.5 * np.tan((inc + a95) * rad)), rad)
paleo_lat_min = old_div(
np.arctan(0.5 * np.tan((inc - a95) * rad)), rad)
return paleo_lat, paleo_lat_max, paleo_lat_min
else:
return paleo_lat |
def close_all_pages(self):
"""Closes all tabs of the states editor"""
states_to_be_closed = []
for state_identifier in self.tabs:
states_to_be_closed.append(state_identifier)
for state_identifier in states_to_be_closed:
self.close_page(state_identifier, delete=False) | Closes all tabs of the states editor | Below is the the instruction that describes the task:
### Input:
Closes all tabs of the states editor
### Response:
def close_all_pages(self):
"""Closes all tabs of the states editor"""
states_to_be_closed = []
for state_identifier in self.tabs:
states_to_be_closed.append(state_identifier)
for state_identifier in states_to_be_closed:
self.close_page(state_identifier, delete=False) |
def is_cached(self, version=None):
'''
Set the cache property to start/stop file caching for this archive
'''
version = _process_version(self, version)
if self.api.cache and self.api.cache.fs.isfile(
self.get_version_path(version)):
return True
return False | Set the cache property to start/stop file caching for this archive | Below is the the instruction that describes the task:
### Input:
Set the cache property to start/stop file caching for this archive
### Response:
def is_cached(self, version=None):
'''
Set the cache property to start/stop file caching for this archive
'''
version = _process_version(self, version)
if self.api.cache and self.api.cache.fs.isfile(
self.get_version_path(version)):
return True
return False |
def from_rdd_of_dataframes(self, rdd, column_idxs=None):
"""Take an RDD of Panda's DataFrames and return a Dataframe.
If the columns and indexes are already known (e.g. applyMap)
then supplying them with columnsIndexes will skip eveluating
the first partition to determine index info."""
def frame_to_spark_sql(frame):
"""Convert a Panda's DataFrame into Spark SQL Rows"""
return [r.tolist() for r in frame.to_records()]
def frame_to_schema_and_idx_names(frames):
"""Returns the schema and index names of the frames. Useful
if the frame is large and we wish to avoid transfering
the entire frame. Only bothers to apply once per partiton"""
try:
frame = frames.next()
return [(list(frame.columns), list(frame.index.names))]
except StopIteration:
return []
# Store if the RDD was persisted so we don't uncache an
# explicitly cached input.
was_persisted = rdd.is_cached
# If we haven't been supplied with the schema info cache the RDD
# since we are going to eveluate the first partition and then eveluate
# the entire RDD as part of creating a Spark DataFrame.
(schema, index_names) = ([], [])
if not column_idxs:
rdd.cache()
(schema, index_names) = rdd.mapPartitions(
frame_to_schema_and_idx_names).first()
else:
(schema, index_names) = column_idxs
# Add the index_names to the schema.
index_names = _normalize_index_names(index_names)
schema = index_names + schema
ddf = DataFrame.from_schema_rdd(
self.sql_ctx.createDataFrame(rdd.flatMap(frame_to_spark_sql),
schema=schema))
ddf._index_names = index_names
if not was_persisted:
rdd.unpersist()
return ddf | Take an RDD of Panda's DataFrames and return a Dataframe.
If the columns and indexes are already known (e.g. applyMap)
then supplying them with columnsIndexes will skip eveluating
the first partition to determine index info. | Below is the the instruction that describes the task:
### Input:
Take an RDD of Panda's DataFrames and return a Dataframe.
If the columns and indexes are already known (e.g. applyMap)
then supplying them with columnsIndexes will skip eveluating
the first partition to determine index info.
### Response:
def from_rdd_of_dataframes(self, rdd, column_idxs=None):
"""Take an RDD of Panda's DataFrames and return a Dataframe.
If the columns and indexes are already known (e.g. applyMap)
then supplying them with columnsIndexes will skip eveluating
the first partition to determine index info."""
def frame_to_spark_sql(frame):
"""Convert a Panda's DataFrame into Spark SQL Rows"""
return [r.tolist() for r in frame.to_records()]
def frame_to_schema_and_idx_names(frames):
"""Returns the schema and index names of the frames. Useful
if the frame is large and we wish to avoid transfering
the entire frame. Only bothers to apply once per partiton"""
try:
frame = frames.next()
return [(list(frame.columns), list(frame.index.names))]
except StopIteration:
return []
# Store if the RDD was persisted so we don't uncache an
# explicitly cached input.
was_persisted = rdd.is_cached
# If we haven't been supplied with the schema info cache the RDD
# since we are going to eveluate the first partition and then eveluate
# the entire RDD as part of creating a Spark DataFrame.
(schema, index_names) = ([], [])
if not column_idxs:
rdd.cache()
(schema, index_names) = rdd.mapPartitions(
frame_to_schema_and_idx_names).first()
else:
(schema, index_names) = column_idxs
# Add the index_names to the schema.
index_names = _normalize_index_names(index_names)
schema = index_names + schema
ddf = DataFrame.from_schema_rdd(
self.sql_ctx.createDataFrame(rdd.flatMap(frame_to_spark_sql),
schema=schema))
ddf._index_names = index_names
if not was_persisted:
rdd.unpersist()
return ddf |
def _parse_src(cls, src_contents, src_filename):
"""
Return a stream of `(token_type, value)` tuples
parsed from `src_contents` (str)
Uses `src_filename` to guess the type of file
so it can highlight syntax correctly.
"""
# Parse the source into tokens
try:
lexer = guess_lexer_for_filename(src_filename, src_contents)
except ClassNotFound:
lexer = TextLexer()
# Ensure that we don't strip newlines from
# the source file when lexing.
lexer.stripnl = False
return pygments.lex(src_contents, lexer) | Return a stream of `(token_type, value)` tuples
parsed from `src_contents` (str)
Uses `src_filename` to guess the type of file
so it can highlight syntax correctly. | Below is the the instruction that describes the task:
### Input:
Return a stream of `(token_type, value)` tuples
parsed from `src_contents` (str)
Uses `src_filename` to guess the type of file
so it can highlight syntax correctly.
### Response:
def _parse_src(cls, src_contents, src_filename):
"""
Return a stream of `(token_type, value)` tuples
parsed from `src_contents` (str)
Uses `src_filename` to guess the type of file
so it can highlight syntax correctly.
"""
# Parse the source into tokens
try:
lexer = guess_lexer_for_filename(src_filename, src_contents)
except ClassNotFound:
lexer = TextLexer()
# Ensure that we don't strip newlines from
# the source file when lexing.
lexer.stripnl = False
return pygments.lex(src_contents, lexer) |
def on_trial_remove(self, trial_runner, trial):
"""Marks trial as completed if it is paused and has previously ran."""
if trial.status is Trial.PAUSED and trial in self._results:
self._completed_trials.add(trial) | Marks trial as completed if it is paused and has previously ran. | Below is the the instruction that describes the task:
### Input:
Marks trial as completed if it is paused and has previously ran.
### Response:
def on_trial_remove(self, trial_runner, trial):
"""Marks trial as completed if it is paused and has previously ran."""
if trial.status is Trial.PAUSED and trial in self._results:
self._completed_trials.add(trial) |
def make_outpoint(tx_id_le, index, tree=None):
'''
byte-like, int, int -> Outpoint
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredOutpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4),
tree=utils.i2le_padded(tree, 1))
return tx.Outpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4)) | byte-like, int, int -> Outpoint | Below is the the instruction that describes the task:
### Input:
byte-like, int, int -> Outpoint
### Response:
def make_outpoint(tx_id_le, index, tree=None):
'''
byte-like, int, int -> Outpoint
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredOutpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4),
tree=utils.i2le_padded(tree, 1))
return tx.Outpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4)) |
def getFeatureID(self, location):
"""
Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid.
"""
if not self.contains(location):
return self.EMPTY_FEATURE
return self.SPHERICAL_SURFACE | Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid. | Below is the the instruction that describes the task:
### Input:
Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid.
### Response:
def getFeatureID(self, location):
"""
Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid.
"""
if not self.contains(location):
return self.EMPTY_FEATURE
return self.SPHERICAL_SURFACE |
def postComponents(self, name, status, **kwargs):
'''Create a new component.
:param name: Name of the component
:param status: Status of the component; 1-4
:param description: (optional) Description of the component
:param link: (optional) A hyperlink to the component
:param order: (optional) Order of the component
:param group_id: (optional) The group id that the component is within
:param enabled: (optional)
:return: :class:`Response <Response>` object
:rtype: requests.Response
'''
kwargs['name'] = name
kwargs['status'] = status
return self.__postRequest('/components', kwargs) | Create a new component.
:param name: Name of the component
:param status: Status of the component; 1-4
:param description: (optional) Description of the component
:param link: (optional) A hyperlink to the component
:param order: (optional) Order of the component
:param group_id: (optional) The group id that the component is within
:param enabled: (optional)
:return: :class:`Response <Response>` object
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Create a new component.
:param name: Name of the component
:param status: Status of the component; 1-4
:param description: (optional) Description of the component
:param link: (optional) A hyperlink to the component
:param order: (optional) Order of the component
:param group_id: (optional) The group id that the component is within
:param enabled: (optional)
:return: :class:`Response <Response>` object
:rtype: requests.Response
### Response:
def postComponents(self, name, status, **kwargs):
'''Create a new component.
:param name: Name of the component
:param status: Status of the component; 1-4
:param description: (optional) Description of the component
:param link: (optional) A hyperlink to the component
:param order: (optional) Order of the component
:param group_id: (optional) The group id that the component is within
:param enabled: (optional)
:return: :class:`Response <Response>` object
:rtype: requests.Response
'''
kwargs['name'] = name
kwargs['status'] = status
return self.__postRequest('/components', kwargs) |
def _package_to_staging(staging_package_url):
"""Repackage this package from local installed location and copy it to GCS.
Args:
staging_package_url: GCS path.
"""
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/_structured_data/this_file
package_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../'))
setup_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'master_setup.py'))
tar_gz_path = os.path.join(staging_package_url, 'staging', 'trainer.tar.gz')
print('Building package and uploading to %s' % tar_gz_path)
ml.package_and_copy(package_root, setup_path, tar_gz_path)
return tar_gz_path | Repackage this package from local installed location and copy it to GCS.
Args:
staging_package_url: GCS path. | Below is the the instruction that describes the task:
### Input:
Repackage this package from local installed location and copy it to GCS.
Args:
staging_package_url: GCS path.
### Response:
def _package_to_staging(staging_package_url):
"""Repackage this package from local installed location and copy it to GCS.
Args:
staging_package_url: GCS path.
"""
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/_structured_data/this_file
package_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../'))
setup_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'master_setup.py'))
tar_gz_path = os.path.join(staging_package_url, 'staging', 'trainer.tar.gz')
print('Building package and uploading to %s' % tar_gz_path)
ml.package_and_copy(package_root, setup_path, tar_gz_path)
return tar_gz_path |
def with_legacy_dict(self, legacy_dict_object):
"""Configure a source that consumes the dict that where used on Lexicon 2.x"""
warnings.warn(DeprecationWarning('Legacy configuration object has been used '
'to load the ConfigResolver.'))
return self.with_config_source(LegacyDictConfigSource(legacy_dict_object)) | Configure a source that consumes the dict that where used on Lexicon 2.x | Below is the the instruction that describes the task:
### Input:
Configure a source that consumes the dict that where used on Lexicon 2.x
### Response:
def with_legacy_dict(self, legacy_dict_object):
"""Configure a source that consumes the dict that where used on Lexicon 2.x"""
warnings.warn(DeprecationWarning('Legacy configuration object has been used '
'to load the ConfigResolver.'))
return self.with_config_source(LegacyDictConfigSource(legacy_dict_object)) |
def _get_jid_snapshots(jid, config='root'):
'''
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
'''
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
) | Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed. | Below is the the instruction that describes the task:
### Input:
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
### Response:
def _get_jid_snapshots(jid, config='root'):
'''
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
'''
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
) |
def can_execute(self):
"""True if we can execute the callback."""
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps) | True if we can execute the callback. | Below is the the instruction that describes the task:
### Input:
True if we can execute the callback.
### Response:
def can_execute(self):
"""True if we can execute the callback."""
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps) |
def get_operation_mtf_dimension_names(self, operation_name):
"""The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions.
"""
mtf_dimension_names = set()
for tensor_name in self.get_operation_input_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
for tensor_name in self.get_operation_output_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
return mtf_dimension_names | The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions. | Below is the the instruction that describes the task:
### Input:
The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions.
### Response:
def get_operation_mtf_dimension_names(self, operation_name):
"""The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions.
"""
mtf_dimension_names = set()
for tensor_name in self.get_operation_input_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
for tensor_name in self.get_operation_output_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
return mtf_dimension_names |
def powerupsFor(self, interface):
"""
Returns powerups installed using C{powerUp}, in order of descending
priority.
Powerups found to have been deleted, either during the course of this
powerupsFor iteration, during an upgrader, or previously, will not be
returned.
"""
inMemoryPowerup = self._inMemoryPowerups.get(interface, None)
if inMemoryPowerup is not None:
yield inMemoryPowerup
if self.store is None:
return
name = unicode(qual(interface), 'ascii')
for cable in self.store.query(
_PowerupConnector,
AND(_PowerupConnector.interface == name,
_PowerupConnector.item == self),
sort=_PowerupConnector.priority.descending):
pup = cable.powerup
if pup is None:
# this powerup was probably deleted during an upgrader.
cable.deleteFromStore()
else:
indirector = IPowerupIndirector(pup, None)
if indirector is not None:
yield indirector.indirect(interface)
else:
yield pup | Returns powerups installed using C{powerUp}, in order of descending
priority.
Powerups found to have been deleted, either during the course of this
powerupsFor iteration, during an upgrader, or previously, will not be
returned. | Below is the the instruction that describes the task:
### Input:
Returns powerups installed using C{powerUp}, in order of descending
priority.
Powerups found to have been deleted, either during the course of this
powerupsFor iteration, during an upgrader, or previously, will not be
returned.
### Response:
def powerupsFor(self, interface):
"""
Returns powerups installed using C{powerUp}, in order of descending
priority.
Powerups found to have been deleted, either during the course of this
powerupsFor iteration, during an upgrader, or previously, will not be
returned.
"""
inMemoryPowerup = self._inMemoryPowerups.get(interface, None)
if inMemoryPowerup is not None:
yield inMemoryPowerup
if self.store is None:
return
name = unicode(qual(interface), 'ascii')
for cable in self.store.query(
_PowerupConnector,
AND(_PowerupConnector.interface == name,
_PowerupConnector.item == self),
sort=_PowerupConnector.priority.descending):
pup = cable.powerup
if pup is None:
# this powerup was probably deleted during an upgrader.
cable.deleteFromStore()
else:
indirector = IPowerupIndirector(pup, None)
if indirector is not None:
yield indirector.indirect(interface)
else:
yield pup |
def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | Below is the the instruction that describes the task:
### Input:
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
### Response:
def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) |
def bcftoolsMpileup(outFile, referenceFile, alignmentFile, executor):
"""
Use bcftools mpileup to generate VCF.
@param outFile: The C{str} name to write the output to.
@param referenceFile: The C{str} name of the FASTA file with the reference
sequence.
@param alignmentFile: The C{str} name of the SAM or BAM alignment file.
@param executor: An C{Executor} instance.
"""
executor.execute(
'bcftools mpileup -Ov -f %s %s > %s' %
(referenceFile, alignmentFile, outFile)) | Use bcftools mpileup to generate VCF.
@param outFile: The C{str} name to write the output to.
@param referenceFile: The C{str} name of the FASTA file with the reference
sequence.
@param alignmentFile: The C{str} name of the SAM or BAM alignment file.
@param executor: An C{Executor} instance. | Below is the the instruction that describes the task:
### Input:
Use bcftools mpileup to generate VCF.
@param outFile: The C{str} name to write the output to.
@param referenceFile: The C{str} name of the FASTA file with the reference
sequence.
@param alignmentFile: The C{str} name of the SAM or BAM alignment file.
@param executor: An C{Executor} instance.
### Response:
def bcftoolsMpileup(outFile, referenceFile, alignmentFile, executor):
"""
Use bcftools mpileup to generate VCF.
@param outFile: The C{str} name to write the output to.
@param referenceFile: The C{str} name of the FASTA file with the reference
sequence.
@param alignmentFile: The C{str} name of the SAM or BAM alignment file.
@param executor: An C{Executor} instance.
"""
executor.execute(
'bcftools mpileup -Ov -f %s %s > %s' %
(referenceFile, alignmentFile, outFile)) |
def metadata_ports_to_k8s_ports(ports):
"""
:param ports: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
:return: list of V1ServicePort
"""
exposed_ports = []
for port in ports:
splits = port.split("/", 1)
port = int(splits[0])
protocol = splits[1].upper() if len(splits) > 1 else None
exposed_ports.append(client.V1ServicePort(port=port, protocol=protocol))
return exposed_ports | :param ports: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
:return: list of V1ServicePort | Below is the the instruction that describes the task:
### Input:
:param ports: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
:return: list of V1ServicePort
### Response:
def metadata_ports_to_k8s_ports(ports):
"""
:param ports: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
:return: list of V1ServicePort
"""
exposed_ports = []
for port in ports:
splits = port.split("/", 1)
port = int(splits[0])
protocol = splits[1].upper() if len(splits) > 1 else None
exposed_ports.append(client.V1ServicePort(port=port, protocol=protocol))
return exposed_ports |
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if self._apps.has_key(GUI_WX):
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook() | Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL. | Below is the the instruction that describes the task:
### Input:
Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
### Response:
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if self._apps.has_key(GUI_WX):
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook() |
def _getarray(loci, tree):
"""
parse the loci file list and return presence/absence matrix
ordered by the tips on the tree
"""
## order tips
tree.ladderize()
## get tip names
snames = tree.get_leaf_names()
## make an empty matrix
lxs = np.zeros((len(snames), len(loci)), dtype=np.int)
## fill the matrix
for loc in xrange(len(loci)):
for seq in loci[loc].split("\n")[:-1]:
lxs[snames.index(seq.split()[0]), loc] += 1
return lxs, snames | parse the loci file list and return presence/absence matrix
ordered by the tips on the tree | Below is the the instruction that describes the task:
### Input:
parse the loci file list and return presence/absence matrix
ordered by the tips on the tree
### Response:
def _getarray(loci, tree):
"""
parse the loci file list and return presence/absence matrix
ordered by the tips on the tree
"""
## order tips
tree.ladderize()
## get tip names
snames = tree.get_leaf_names()
## make an empty matrix
lxs = np.zeros((len(snames), len(loci)), dtype=np.int)
## fill the matrix
for loc in xrange(len(loci)):
for seq in loci[loc].split("\n")[:-1]:
lxs[snames.index(seq.split()[0]), loc] += 1
return lxs, snames |
def get_trips(self, authentication_info, start, end):
"""Get trips for this device between start and end."""
import requests
if (authentication_info is None or
not authentication_info.is_valid()):
return []
data_url = "https://api.ritassist.nl/api/trips/GetTrips"
query = f"?equipmentId={self.identifier}&from={start}&to={end}&extendedInfo=True"
header = authentication_info.create_header()
response = requests.get(data_url + query, headers=header)
trips = response.json()
result = []
for trip_json in trips:
trip = Trip(trip_json)
result.append(trip)
return result | Get trips for this device between start and end. | Below is the the instruction that describes the task:
### Input:
Get trips for this device between start and end.
### Response:
def get_trips(self, authentication_info, start, end):
"""Get trips for this device between start and end."""
import requests
if (authentication_info is None or
not authentication_info.is_valid()):
return []
data_url = "https://api.ritassist.nl/api/trips/GetTrips"
query = f"?equipmentId={self.identifier}&from={start}&to={end}&extendedInfo=True"
header = authentication_info.create_header()
response = requests.get(data_url + query, headers=header)
trips = response.json()
result = []
for trip_json in trips:
trip = Trip(trip_json)
result.append(trip)
return result |
def update_viewer_state(rec, context):
"""
Given viewer session information, make sure the session information is
compatible with the current version of the viewers, and if not, update
the session information in-place.
"""
if '_protocol' not in rec:
rec.pop('properties')
rec['state'] = {}
rec['state']['values'] = rec.pop('options')
layer_states = []
for layer in rec['layers']:
state_id = str(uuid.uuid4())
state_cls = STATE_CLASS[layer['_type'].split('.')[-1]]
state = state_cls(layer=context.object(layer.pop('layer')))
properties = set(layer.keys()) - set(['_type'])
for prop in sorted(properties, key=state.update_priority, reverse=True):
value = layer.pop(prop)
value = context.object(value)
if isinstance(value, six.string_types) and value == 'fixed':
value = 'Fixed'
if isinstance(value, six.string_types) and value == 'linear':
value = 'Linear'
setattr(state, prop, value)
context.register_object(state_id, state)
layer['state'] = state_id
layer_states.append(state)
list_id = str(uuid.uuid4())
context.register_object(list_id, layer_states)
rec['state']['values']['layers'] = list_id
rec['state']['values']['visible_axes'] = rec['state']['values'].pop('visible_box') | Given viewer session information, make sure the session information is
compatible with the current version of the viewers, and if not, update
the session information in-place. | Below is the the instruction that describes the task:
### Input:
Given viewer session information, make sure the session information is
compatible with the current version of the viewers, and if not, update
the session information in-place.
### Response:
def update_viewer_state(rec, context):
"""
Given viewer session information, make sure the session information is
compatible with the current version of the viewers, and if not, update
the session information in-place.
"""
if '_protocol' not in rec:
rec.pop('properties')
rec['state'] = {}
rec['state']['values'] = rec.pop('options')
layer_states = []
for layer in rec['layers']:
state_id = str(uuid.uuid4())
state_cls = STATE_CLASS[layer['_type'].split('.')[-1]]
state = state_cls(layer=context.object(layer.pop('layer')))
properties = set(layer.keys()) - set(['_type'])
for prop in sorted(properties, key=state.update_priority, reverse=True):
value = layer.pop(prop)
value = context.object(value)
if isinstance(value, six.string_types) and value == 'fixed':
value = 'Fixed'
if isinstance(value, six.string_types) and value == 'linear':
value = 'Linear'
setattr(state, prop, value)
context.register_object(state_id, state)
layer['state'] = state_id
layer_states.append(state)
list_id = str(uuid.uuid4())
context.register_object(list_id, layer_states)
rec['state']['values']['layers'] = list_id
rec['state']['values']['visible_axes'] = rec['state']['values'].pop('visible_box') |
def predict_features(self, df_features, df_target, idx=0, **kwargs):
"""For one variable, predict its neighbouring nodes.
Args:
df_features (pandas.DataFrame):
df_target (pandas.Series):
idx (int): (optional) for printing purposes
kwargs (dict): additional options for algorithms
Returns:
list: scores of each feature relatively to the target
.. warning::
Not implemented. Implemented by the algorithms.
"""
y = np.transpose(df_target.values)
X = np.transpose(df_features.values)
path, beta, A, lam = hsiclasso(X, y)
return beta | For one variable, predict its neighbouring nodes.
Args:
df_features (pandas.DataFrame):
df_target (pandas.Series):
idx (int): (optional) for printing purposes
kwargs (dict): additional options for algorithms
Returns:
list: scores of each feature relatively to the target
.. warning::
Not implemented. Implemented by the algorithms. | Below is the the instruction that describes the task:
### Input:
For one variable, predict its neighbouring nodes.
Args:
df_features (pandas.DataFrame):
df_target (pandas.Series):
idx (int): (optional) for printing purposes
kwargs (dict): additional options for algorithms
Returns:
list: scores of each feature relatively to the target
.. warning::
Not implemented. Implemented by the algorithms.
### Response:
def predict_features(self, df_features, df_target, idx=0, **kwargs):
"""For one variable, predict its neighbouring nodes.
Args:
df_features (pandas.DataFrame):
df_target (pandas.Series):
idx (int): (optional) for printing purposes
kwargs (dict): additional options for algorithms
Returns:
list: scores of each feature relatively to the target
.. warning::
Not implemented. Implemented by the algorithms.
"""
y = np.transpose(df_target.values)
X = np.transpose(df_features.values)
path, beta, A, lam = hsiclasso(X, y)
return beta |
def get_partition_function(self):
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor_product(factor, *[self.factors[i] for i in range(1, len(self.factors))])
return np.sum(factor.values) | r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function() | Below is the the instruction that describes the task:
### Input:
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
### Response:
def get_partition_function(self):
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor_product(factor, *[self.factors[i] for i in range(1, len(self.factors))])
return np.sum(factor.values) |
def tsv_pairs_to_dict(line: str, key_lower: bool = True) -> Dict[str, str]:
r"""
Converts a TSV line into sequential key/value pairs as a dictionary.
For example,
.. code-block:: none
field1\tvalue1\tfield2\tvalue2
becomes
.. code-block:: none
{"field1": "value1", "field2": "value2"}
Args:
line: the line
key_lower: should the keys be forced to lower case?
"""
items = line.split("\t")
d = {} # type: Dict[str, str]
for chunk in chunks(items, 2):
if len(chunk) < 2:
log.warning("Bad chunk, not of length 2: {!r}", chunk)
continue
key = chunk[0]
value = unescape_tabs_newlines(chunk[1])
if key_lower:
key = key.lower()
d[key] = value
return d | r"""
Converts a TSV line into sequential key/value pairs as a dictionary.
For example,
.. code-block:: none
field1\tvalue1\tfield2\tvalue2
becomes
.. code-block:: none
{"field1": "value1", "field2": "value2"}
Args:
line: the line
key_lower: should the keys be forced to lower case? | Below is the the instruction that describes the task:
### Input:
r"""
Converts a TSV line into sequential key/value pairs as a dictionary.
For example,
.. code-block:: none
field1\tvalue1\tfield2\tvalue2
becomes
.. code-block:: none
{"field1": "value1", "field2": "value2"}
Args:
line: the line
key_lower: should the keys be forced to lower case?
### Response:
def tsv_pairs_to_dict(line: str, key_lower: bool = True) -> Dict[str, str]:
r"""
Converts a TSV line into sequential key/value pairs as a dictionary.
For example,
.. code-block:: none
field1\tvalue1\tfield2\tvalue2
becomes
.. code-block:: none
{"field1": "value1", "field2": "value2"}
Args:
line: the line
key_lower: should the keys be forced to lower case?
"""
items = line.split("\t")
d = {} # type: Dict[str, str]
for chunk in chunks(items, 2):
if len(chunk) < 2:
log.warning("Bad chunk, not of length 2: {!r}", chunk)
continue
key = chunk[0]
value = unescape_tabs_newlines(chunk[1])
if key_lower:
key = key.lower()
d[key] = value
return d |
def read_stdin():
""" Read text from stdin, and print a helpful message for ttys. """
if sys.stdin.isatty() and sys.stdout.isatty():
print('\nReading from stdin until end of file (Ctrl + D)...')
return sys.stdin.read() | Read text from stdin, and print a helpful message for ttys. | Below is the the instruction that describes the task:
### Input:
Read text from stdin, and print a helpful message for ttys.
### Response:
def read_stdin():
""" Read text from stdin, and print a helpful message for ttys. """
if sys.stdin.isatty() and sys.stdout.isatty():
print('\nReading from stdin until end of file (Ctrl + D)...')
return sys.stdin.read() |
def shell(ctx):
"""
open an engineer shell
"""
shell = code.InteractiveConsole({"engineer": getattr(ctx.parent, "widget", None)})
shell.interact("\n".join([
"Engineer connected to %s" % ctx.parent.params["host"],
"Dispatch available through the 'engineer' object"
])) | open an engineer shell | Below is the the instruction that describes the task:
### Input:
open an engineer shell
### Response:
def shell(ctx):
"""
open an engineer shell
"""
shell = code.InteractiveConsole({"engineer": getattr(ctx.parent, "widget", None)})
shell.interact("\n".join([
"Engineer connected to %s" % ctx.parent.params["host"],
"Dispatch available through the 'engineer' object"
])) |
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
) | Prints the completion code of the given shell | Below is the the instruction that describes the task:
### Input:
Prints the completion code of the given shell
### Response:
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
) |
def get_stats(self, nid=None):
"""Get statistics for class
:type nid: str
:param nid: This is the ID of the network to get stats
from. This is optional and only to override the existing
`network_id` entered when created the class
"""
r = self.request(
api_type="main",
method="network.get_stats",
nid=nid,
)
return self._handle_error(r, "Could not retrieve stats for class.") | Get statistics for class
:type nid: str
:param nid: This is the ID of the network to get stats
from. This is optional and only to override the existing
`network_id` entered when created the class | Below is the the instruction that describes the task:
### Input:
Get statistics for class
:type nid: str
:param nid: This is the ID of the network to get stats
from. This is optional and only to override the existing
`network_id` entered when created the class
### Response:
def get_stats(self, nid=None):
"""Get statistics for class
:type nid: str
:param nid: This is the ID of the network to get stats
from. This is optional and only to override the existing
`network_id` entered when created the class
"""
r = self.request(
api_type="main",
method="network.get_stats",
nid=nid,
)
return self._handle_error(r, "Could not retrieve stats for class.") |
def on(cls, event, handler_func=None):
"""
Registers a handler function whenever an instance of the model
emits the given event.
This method can either called directly, passing a function reference:
MyModel.on('did_save', my_function)
...or as a decorator of the function to be registered.
@MyModel.on('did_save')
def myfunction(my_model):
pass
"""
if handler_func:
cls.handler_registrar().register(event, handler_func)
return
def register(fn):
cls.handler_registrar().register(event, fn)
return fn
return register | Registers a handler function whenever an instance of the model
emits the given event.
This method can either called directly, passing a function reference:
MyModel.on('did_save', my_function)
...or as a decorator of the function to be registered.
@MyModel.on('did_save')
def myfunction(my_model):
pass | Below is the the instruction that describes the task:
### Input:
Registers a handler function whenever an instance of the model
emits the given event.
This method can either called directly, passing a function reference:
MyModel.on('did_save', my_function)
...or as a decorator of the function to be registered.
@MyModel.on('did_save')
def myfunction(my_model):
pass
### Response:
def on(cls, event, handler_func=None):
"""
Registers a handler function whenever an instance of the model
emits the given event.
This method can either called directly, passing a function reference:
MyModel.on('did_save', my_function)
...or as a decorator of the function to be registered.
@MyModel.on('did_save')
def myfunction(my_model):
pass
"""
if handler_func:
cls.handler_registrar().register(event, handler_func)
return
def register(fn):
cls.handler_registrar().register(event, fn)
return fn
return register |
def plotConvergenceByColumnTopology(results, columnRange, featureRange, networkType, numTrials):
"""
Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f, c, t] = how long it took it to converge with f unique
# features, c columns and topology t.
convergence = numpy.zeros((max(featureRange), max(columnRange) + 1, len(networkType)))
networkTypeNames = {}
for i, topologyType in enumerate(networkType):
if "Topology" in topologyType:
networkTypeNames[i] = "Normal"
else:
networkTypeNames[i] = "Dense"
for r in results:
convergence[r["numFeatures"] - 1, r["numColumns"], networkType.index(r["networkType"])] += r["convergencePoint"]
convergence /= numTrials
# For each column, print convergence as fct of number of unique features
for c in range(1, max(columnRange) + 1):
for t in range(len(networkType)):
print c, convergence[:, c, t]
# Print everything anyway for debugging
print "Average convergence array=", convergence
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join("plots", "convergence_by_column_topology.pdf")
# Plot each curve
legendList = []
colormap = plt.get_cmap("jet")
colorList = [colormap(x) for x in numpy.linspace(0., 1.,
len(featureRange)*len(networkType))]
for i in range(len(featureRange)):
for t in range(len(networkType)):
f = featureRange[i]
print columnRange
print convergence[f-1,columnRange, t]
legendList.append('Unique features={}, topology={}'.format(f, networkTypeNames[t]))
plt.plot(columnRange, convergence[f-1,columnRange, t],
color=colorList[i*len(networkType) + t])
# format
plt.legend(legendList, loc="upper right")
plt.xlabel("Number of columns")
plt.xticks(columnRange)
plt.yticks(range(0,int(convergence.max())+1))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (multiple columns)")
# save
plt.savefig(plotPath)
plt.close() | Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features. | Below is the the instruction that describes the task:
### Input:
Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features.
### Response:
def plotConvergenceByColumnTopology(results, columnRange, featureRange, networkType, numTrials):
"""
Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f, c, t] = how long it took it to converge with f unique
# features, c columns and topology t.
convergence = numpy.zeros((max(featureRange), max(columnRange) + 1, len(networkType)))
networkTypeNames = {}
for i, topologyType in enumerate(networkType):
if "Topology" in topologyType:
networkTypeNames[i] = "Normal"
else:
networkTypeNames[i] = "Dense"
for r in results:
convergence[r["numFeatures"] - 1, r["numColumns"], networkType.index(r["networkType"])] += r["convergencePoint"]
convergence /= numTrials
# For each column, print convergence as fct of number of unique features
for c in range(1, max(columnRange) + 1):
for t in range(len(networkType)):
print c, convergence[:, c, t]
# Print everything anyway for debugging
print "Average convergence array=", convergence
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join("plots", "convergence_by_column_topology.pdf")
# Plot each curve
legendList = []
colormap = plt.get_cmap("jet")
colorList = [colormap(x) for x in numpy.linspace(0., 1.,
len(featureRange)*len(networkType))]
for i in range(len(featureRange)):
for t in range(len(networkType)):
f = featureRange[i]
print columnRange
print convergence[f-1,columnRange, t]
legendList.append('Unique features={}, topology={}'.format(f, networkTypeNames[t]))
plt.plot(columnRange, convergence[f-1,columnRange, t],
color=colorList[i*len(networkType) + t])
# format
plt.legend(legendList, loc="upper right")
plt.xlabel("Number of columns")
plt.xticks(columnRange)
plt.yticks(range(0,int(convergence.max())+1))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (multiple columns)")
# save
plt.savefig(plotPath)
plt.close() |
def _encryption_context_hash(hasher, encryption_context):
"""Generates the expected hash for the provided encryption context.
:param hasher: Existing hasher to use
:type hasher: cryptography.hazmat.primitives.hashes.Hash
:param dict encryption_context: Encryption context to hash
:returns: Complete hash
:rtype: bytes
"""
serialized_encryption_context = serialize_encryption_context(encryption_context)
hasher.update(serialized_encryption_context)
return hasher.finalize() | Generates the expected hash for the provided encryption context.
:param hasher: Existing hasher to use
:type hasher: cryptography.hazmat.primitives.hashes.Hash
:param dict encryption_context: Encryption context to hash
:returns: Complete hash
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Generates the expected hash for the provided encryption context.
:param hasher: Existing hasher to use
:type hasher: cryptography.hazmat.primitives.hashes.Hash
:param dict encryption_context: Encryption context to hash
:returns: Complete hash
:rtype: bytes
### Response:
def _encryption_context_hash(hasher, encryption_context):
"""Generates the expected hash for the provided encryption context.
:param hasher: Existing hasher to use
:type hasher: cryptography.hazmat.primitives.hashes.Hash
:param dict encryption_context: Encryption context to hash
:returns: Complete hash
:rtype: bytes
"""
serialized_encryption_context = serialize_encryption_context(encryption_context)
hasher.update(serialized_encryption_context)
return hasher.finalize() |
def ubnd(self):
""" the upper bound vector while respecting log transform
Returns
-------
ubnd : pandas.Series
"""
if not self.istransformed:
return self.pst.parameter_data.parubnd.copy()
else:
ub = self.pst.parameter_data.parubnd.copy()
ub[self.log_indexer] = np.log10(ub[self.log_indexer])
return ub | the upper bound vector while respecting log transform
Returns
-------
ubnd : pandas.Series | Below is the the instruction that describes the task:
### Input:
the upper bound vector while respecting log transform
Returns
-------
ubnd : pandas.Series
### Response:
def ubnd(self):
""" the upper bound vector while respecting log transform
Returns
-------
ubnd : pandas.Series
"""
if not self.istransformed:
return self.pst.parameter_data.parubnd.copy()
else:
ub = self.pst.parameter_data.parubnd.copy()
ub[self.log_indexer] = np.log10(ub[self.log_indexer])
return ub |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.