code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def read(self, dataframe_name: str) -> pandas.DataFrame:
"""Evaluate and retrieve a Spark dataframe in the managed session.
:param dataframe_name: The name of the Spark dataframe to read.
"""
code = serialise_dataframe_code(dataframe_name, self.kind)
output = self._execute(code)
output.raise_for_status()
if output.text is None:
raise RuntimeError("statement had no text output")
return deserialise_dataframe(output.text) | Evaluate and retrieve a Spark dataframe in the managed session.
:param dataframe_name: The name of the Spark dataframe to read. | Below is the the instruction that describes the task:
### Input:
Evaluate and retrieve a Spark dataframe in the managed session.
:param dataframe_name: The name of the Spark dataframe to read.
### Response:
def read(self, dataframe_name: str) -> pandas.DataFrame:
"""Evaluate and retrieve a Spark dataframe in the managed session.
:param dataframe_name: The name of the Spark dataframe to read.
"""
code = serialise_dataframe_code(dataframe_name, self.kind)
output = self._execute(code)
output.raise_for_status()
if output.text is None:
raise RuntimeError("statement had no text output")
return deserialise_dataframe(output.text) |
def UpdateColums(self, cursor, field, FieldType, model, columns, UNCHANGED):
'''Updates the columns. Dont call directly
'''
table = model.__name__.lower()
if field not in columns:
n = UNCHANGED.pop()
new_sql = f"ALTER TABLE {table} ADD COLUMN {field} {FieldType} AFTER {n}"
cursor.execute(new_sql)
print("\n\n", new_sql)
else:
UNCHANGED.append(field)
# We drop the fields in the table not in models
TCOLS = set(columns)-set(model._fields)
for col in TCOLS:
columns.remove(col)
QRY = f"ALTER TABLE {table} DROP COLUMN {col}"
cursor.execute(QRY)
print("\n\n", QRY)
return True | Updates the columns. Dont call directly | Below is the the instruction that describes the task:
### Input:
Updates the columns. Dont call directly
### Response:
def UpdateColums(self, cursor, field, FieldType, model, columns, UNCHANGED):
'''Updates the columns. Dont call directly
'''
table = model.__name__.lower()
if field not in columns:
n = UNCHANGED.pop()
new_sql = f"ALTER TABLE {table} ADD COLUMN {field} {FieldType} AFTER {n}"
cursor.execute(new_sql)
print("\n\n", new_sql)
else:
UNCHANGED.append(field)
# We drop the fields in the table not in models
TCOLS = set(columns)-set(model._fields)
for col in TCOLS:
columns.remove(col)
QRY = f"ALTER TABLE {table} DROP COLUMN {col}"
cursor.execute(QRY)
print("\n\n", QRY)
return True |
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
_close_conn(conn)
return ret | Return the information returned when the specified job id was executed | Below is the the instruction that describes the task:
### Input:
Return the information returned when the specified job id was executed
### Response:
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
_close_conn(conn)
return ret |
def _wrap_paginated_response(cls, request, response, controls, data,
head=None):
"""Builds the metadata for a pagingated response and wraps everying in
a JSON encoded web.Response
"""
paging_response = response['paging']
if head is None:
head = response['head_id']
link = cls._build_url(
request,
head=head,
start=paging_response['start'],
limit=paging_response['limit'])
paging = {}
limit = controls.get('limit')
start = controls.get("start")
paging["limit"] = limit
paging["start"] = start
# If there are no resources, there should be nothing else in paging
if paging_response.get("next") == "":
return cls._wrap_response(
request,
data=data,
metadata={
'head': head,
'link': link,
'paging': paging
})
next_id = paging_response['next']
paging['next_position'] = next_id
# Builds paging urls specific to this response
def build_pg_url(start=None):
return cls._build_url(request, head=head, limit=limit, start=start)
paging['next'] = build_pg_url(paging_response['next'])
return cls._wrap_response(
request,
data=data,
metadata={
'head': head,
'link': link,
'paging': paging
}) | Builds the metadata for a pagingated response and wraps everying in
a JSON encoded web.Response | Below is the the instruction that describes the task:
### Input:
Builds the metadata for a pagingated response and wraps everying in
a JSON encoded web.Response
### Response:
def _wrap_paginated_response(cls, request, response, controls, data,
head=None):
"""Builds the metadata for a pagingated response and wraps everying in
a JSON encoded web.Response
"""
paging_response = response['paging']
if head is None:
head = response['head_id']
link = cls._build_url(
request,
head=head,
start=paging_response['start'],
limit=paging_response['limit'])
paging = {}
limit = controls.get('limit')
start = controls.get("start")
paging["limit"] = limit
paging["start"] = start
# If there are no resources, there should be nothing else in paging
if paging_response.get("next") == "":
return cls._wrap_response(
request,
data=data,
metadata={
'head': head,
'link': link,
'paging': paging
})
next_id = paging_response['next']
paging['next_position'] = next_id
# Builds paging urls specific to this response
def build_pg_url(start=None):
return cls._build_url(request, head=head, limit=limit, start=start)
paging['next'] = build_pg_url(paging_response['next'])
return cls._wrap_response(
request,
data=data,
metadata={
'head': head,
'link': link,
'paging': paging
}) |
def login(request):
"""
Log in
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
username:
user's name
password:
user's password
"""
if request.method == 'GET':
return render(request, 'user_login.html', {}, help_text=login.__doc__)
elif request.method == 'POST':
credentials = json_body(request.body.decode("utf-8"))
user = auth.authenticate(
username=credentials.get('username', ''),
password=credentials.get('password', ''),
)
if user is None:
return render_json(request, {
'error': _('Password or username does not match.'),
'error_type': 'password_username_not_match'
}, template='user_json.html', status=401)
if not user.is_active:
return render_json(request, {
'error': _('The account has not been activated.'),
'error_type': 'account_not_activated'
}, template='user_json.html', status=401)
auth.login(request, user)
request.method = "GET"
return profile(request)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | Log in
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
username:
user's name
password:
user's password | Below is the the instruction that describes the task:
### Input:
Log in
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
username:
user's name
password:
user's password
### Response:
def login(request):
"""
Log in
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
username:
user's name
password:
user's password
"""
if request.method == 'GET':
return render(request, 'user_login.html', {}, help_text=login.__doc__)
elif request.method == 'POST':
credentials = json_body(request.body.decode("utf-8"))
user = auth.authenticate(
username=credentials.get('username', ''),
password=credentials.get('password', ''),
)
if user is None:
return render_json(request, {
'error': _('Password or username does not match.'),
'error_type': 'password_username_not_match'
}, template='user_json.html', status=401)
if not user.is_active:
return render_json(request, {
'error': _('The account has not been activated.'),
'error_type': 'account_not_activated'
}, template='user_json.html', status=401)
auth.login(request, user)
request.method = "GET"
return profile(request)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) |
def _make_key(func,
args,
kwds,
typed,
kwd_mark=(object(), ),
fasttypes={int, str, frozenset, type(None)},
sorted=sorted,
tuple=tuple,
type=type,
len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
key += kwd_mark
key += ('__func__', func)
if kwds:
sorted_items = sorted(kwds.items())
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key) | Make a cache key from optionally typed positional and keyword arguments | Below is the the instruction that describes the task:
### Input:
Make a cache key from optionally typed positional and keyword arguments
### Response:
def _make_key(func,
args,
kwds,
typed,
kwd_mark=(object(), ),
fasttypes={int, str, frozenset, type(None)},
sorted=sorted,
tuple=tuple,
type=type,
len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
key += kwd_mark
key += ('__func__', func)
if kwds:
sorted_items = sorted(kwds.items())
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key) |
def get_runtime(self):
""":class:`timedelta` with the run-time, None if the Task is not running"""
if self.start is None: return None
if self.end is None:
delta = datetime.datetime.now() - self.start
else:
delta = self.end - self.start
return MyTimedelta.as_timedelta(delta) | :class:`timedelta` with the run-time, None if the Task is not running | Below is the the instruction that describes the task:
### Input:
:class:`timedelta` with the run-time, None if the Task is not running
### Response:
def get_runtime(self):
""":class:`timedelta` with the run-time, None if the Task is not running"""
if self.start is None: return None
if self.end is None:
delta = datetime.datetime.now() - self.start
else:
delta = self.end - self.start
return MyTimedelta.as_timedelta(delta) |
def time(self, intervals=1, *args, _show_progress=True, _print=True,
_collect_garbage=True, _quiet=True, **kwargs):
""" Measures the execution time of :prop:_callable for @intervals
@intervals: #int number of intervals to measure the execution time
of the function for
@*args: arguments to pass to the callable being timed
@**kwargs: arguments to pass to the callable being timed
@_show_progress: #bool whether or not to print a progress bar
@_print: #bool whether or not to print the results of the timing
@_collect_garbage: #bool whether or not to garbage collect
while timing
@_quiet: #bool whether or not to disable the print() function's
ability to output to terminal during the timing
-> :class:collections.OrderedDict of stats about the timing
"""
self.reset()
args = list(args) + list(self._callableargs[0])
_kwargs = self._callableargs[1]
_kwargs.update(kwargs)
kwargs = _kwargs
if not _collect_garbage:
gc.disable() # Garbage collection setting
gc.collect()
self.allocated_memory = 0
for x in self.progress(intervals):
if _quiet: # Quiets print()s in the tested function
sys.stdout = NullIO()
try:
self.start() # Starts the timer
self._callable(*args, **kwargs)
self.stop() # Stops the timer
except Exception as e:
if _quiet: # Unquiets prints()
sys.stdout = sys.__stdout__
raise e
if _quiet: # Unquiets prints()
sys.stdout = sys.__stdout__
if not _collect_garbage:
gc.enable() # Garbage collection setting
if _print:
self.info() | Measures the execution time of :prop:_callable for @intervals
@intervals: #int number of intervals to measure the execution time
of the function for
@*args: arguments to pass to the callable being timed
@**kwargs: arguments to pass to the callable being timed
@_show_progress: #bool whether or not to print a progress bar
@_print: #bool whether or not to print the results of the timing
@_collect_garbage: #bool whether or not to garbage collect
while timing
@_quiet: #bool whether or not to disable the print() function's
ability to output to terminal during the timing
-> :class:collections.OrderedDict of stats about the timing | Below is the the instruction that describes the task:
### Input:
Measures the execution time of :prop:_callable for @intervals
@intervals: #int number of intervals to measure the execution time
of the function for
@*args: arguments to pass to the callable being timed
@**kwargs: arguments to pass to the callable being timed
@_show_progress: #bool whether or not to print a progress bar
@_print: #bool whether or not to print the results of the timing
@_collect_garbage: #bool whether or not to garbage collect
while timing
@_quiet: #bool whether or not to disable the print() function's
ability to output to terminal during the timing
-> :class:collections.OrderedDict of stats about the timing
### Response:
def time(self, intervals=1, *args, _show_progress=True, _print=True,
_collect_garbage=True, _quiet=True, **kwargs):
""" Measures the execution time of :prop:_callable for @intervals
@intervals: #int number of intervals to measure the execution time
of the function for
@*args: arguments to pass to the callable being timed
@**kwargs: arguments to pass to the callable being timed
@_show_progress: #bool whether or not to print a progress bar
@_print: #bool whether or not to print the results of the timing
@_collect_garbage: #bool whether or not to garbage collect
while timing
@_quiet: #bool whether or not to disable the print() function's
ability to output to terminal during the timing
-> :class:collections.OrderedDict of stats about the timing
"""
self.reset()
args = list(args) + list(self._callableargs[0])
_kwargs = self._callableargs[1]
_kwargs.update(kwargs)
kwargs = _kwargs
if not _collect_garbage:
gc.disable() # Garbage collection setting
gc.collect()
self.allocated_memory = 0
for x in self.progress(intervals):
if _quiet: # Quiets print()s in the tested function
sys.stdout = NullIO()
try:
self.start() # Starts the timer
self._callable(*args, **kwargs)
self.stop() # Stops the timer
except Exception as e:
if _quiet: # Unquiets prints()
sys.stdout = sys.__stdout__
raise e
if _quiet: # Unquiets prints()
sys.stdout = sys.__stdout__
if not _collect_garbage:
gc.enable() # Garbage collection setting
if _print:
self.info() |
def is_enabled(name):
'''
List a Job only if its enabled
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' schedule.is_enabled name=job_name
'''
current_schedule = __salt__['schedule.list'](show_all=False, return_yaml=False)
if name in current_schedule:
return current_schedule[name]
else:
return {} | List a Job only if its enabled
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' schedule.is_enabled name=job_name | Below is the the instruction that describes the task:
### Input:
List a Job only if its enabled
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' schedule.is_enabled name=job_name
### Response:
def is_enabled(name):
'''
List a Job only if its enabled
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' schedule.is_enabled name=job_name
'''
current_schedule = __salt__['schedule.list'](show_all=False, return_yaml=False)
if name in current_schedule:
return current_schedule[name]
else:
return {} |
def free_parameters(self):
"""
Returns a dictionary of free parameters for this function
:return: dictionary of free parameters
"""
free_parameters = collections.OrderedDict([(k,v) for k, v in self.parameters.iteritems() if v.free])
return free_parameters | Returns a dictionary of free parameters for this function
:return: dictionary of free parameters | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of free parameters for this function
:return: dictionary of free parameters
### Response:
def free_parameters(self):
"""
Returns a dictionary of free parameters for this function
:return: dictionary of free parameters
"""
free_parameters = collections.OrderedDict([(k,v) for k, v in self.parameters.iteritems() if v.free])
return free_parameters |
def address_offset(self):
"""
Byte address offset of this node relative to it's parent
If this node is an array, it's index must be known
Raises
------
ValueError
If this property is referenced on a node whose array index is not
fully defined
"""
if self.inst.is_array:
if self.current_idx is None:
raise ValueError("Index of array element must be known to derive address")
# Calculate the "flattened" index of a general multidimensional array
# For example, a component array declared as:
# foo[S0][S1][S2]
# and referenced as:
# foo[I0][I1][I2]
# Is flattened like this:
# idx = I0*S1*S2 + I1*S2 + I2
idx = 0
for i in range(len(self.current_idx)):
sz = 1
for j in range(i+1, len(self.inst.array_dimensions)):
sz *= self.inst.array_dimensions[j]
idx += sz * self.current_idx[i]
offset = self.inst.addr_offset + idx * self.inst.array_stride
else:
offset = self.inst.addr_offset
return offset | Byte address offset of this node relative to it's parent
If this node is an array, it's index must be known
Raises
------
ValueError
If this property is referenced on a node whose array index is not
fully defined | Below is the the instruction that describes the task:
### Input:
Byte address offset of this node relative to it's parent
If this node is an array, it's index must be known
Raises
------
ValueError
If this property is referenced on a node whose array index is not
fully defined
### Response:
def address_offset(self):
"""
Byte address offset of this node relative to it's parent
If this node is an array, it's index must be known
Raises
------
ValueError
If this property is referenced on a node whose array index is not
fully defined
"""
if self.inst.is_array:
if self.current_idx is None:
raise ValueError("Index of array element must be known to derive address")
# Calculate the "flattened" index of a general multidimensional array
# For example, a component array declared as:
# foo[S0][S1][S2]
# and referenced as:
# foo[I0][I1][I2]
# Is flattened like this:
# idx = I0*S1*S2 + I1*S2 + I2
idx = 0
for i in range(len(self.current_idx)):
sz = 1
for j in range(i+1, len(self.inst.array_dimensions)):
sz *= self.inst.array_dimensions[j]
idx += sz * self.current_idx[i]
offset = self.inst.addr_offset + idx * self.inst.array_stride
else:
offset = self.inst.addr_offset
return offset |
def copy_action_callback(self, *event):
"""Add a copy of all selected row dict value pairs to the clipboard"""
if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None:
_, dict_paths = self.get_view_selection()
selected_data_list = []
for dict_path_as_list in dict_paths:
value = self.model.state.semantic_data
for path_element in dict_path_as_list:
value = value[path_element]
selected_data_list.append((path_element, value))
rafcon.gui.clipboard.global_clipboard.set_semantic_dictionary_list(selected_data_list) | Add a copy of all selected row dict value pairs to the clipboard | Below is the the instruction that describes the task:
### Input:
Add a copy of all selected row dict value pairs to the clipboard
### Response:
def copy_action_callback(self, *event):
"""Add a copy of all selected row dict value pairs to the clipboard"""
if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None:
_, dict_paths = self.get_view_selection()
selected_data_list = []
for dict_path_as_list in dict_paths:
value = self.model.state.semantic_data
for path_element in dict_path_as_list:
value = value[path_element]
selected_data_list.append((path_element, value))
rafcon.gui.clipboard.global_clipboard.set_semantic_dictionary_list(selected_data_list) |
def train(nr_iter, examples):
'''Return an averaged perceptron model trained on ``examples`` for
``nr_iter`` iterations.
'''
model = AveragedPerceptron()
for i in range(nr_iter):
random.shuffle(examples)
for features, class_ in examples:
scores = model.predict(features)
guess, score = max(scores.items(), key=lambda i: i[1])
if guess != class_:
model.update(class_, guess, features)
model.average_weights()
return model | Return an averaged perceptron model trained on ``examples`` for
``nr_iter`` iterations. | Below is the the instruction that describes the task:
### Input:
Return an averaged perceptron model trained on ``examples`` for
``nr_iter`` iterations.
### Response:
def train(nr_iter, examples):
'''Return an averaged perceptron model trained on ``examples`` for
``nr_iter`` iterations.
'''
model = AveragedPerceptron()
for i in range(nr_iter):
random.shuffle(examples)
for features, class_ in examples:
scores = model.predict(features)
guess, score = max(scores.items(), key=lambda i: i[1])
if guess != class_:
model.update(class_, guess, features)
model.average_weights()
return model |
def checkplot_pickle_update(
currentcp,
updatedcp,
outfile=None,
outgzip=False,
pickleprotocol=None,
verbose=True
):
'''This updates the current checkplotdict with updated values provided.
Parameters
----------
currentcp : dict or str
This is either a checkplotdict produced by `checkplot_pickle` above or a
checkplot pickle file produced by the same function. This checkplot will
be updated from the `updatedcp` checkplot.
updatedcp : dict or str
This is either a checkplotdict produced by `checkplot_pickle` above or a
checkplot pickle file produced by the same function. This checkplot will
be the source of the update to the `currentcp` checkplot.
outfile : str or None
The name of the output checkplot pickle file. The function will output
the new checkplot gzipped pickle file to `outfile` if outfile is a
filename. If `currentcp` is a file and `outfile`, this will be set to
that filename, so the function updates it in place.
outgzip : bool
This controls whether to gzip the output pickle. It turns out that this
is the slowest bit in the output process, so if you're after speed, best
not to use this. This is False by default since it turns out that gzip
actually doesn't save that much space (29 MB vs. 35 MB for the average
checkplot pickle).
pickleprotocol : int or None
This sets the pickle file protocol to use when writing the pickle:
If None, will choose a protocol using the following rules:
- 4 -> default in Python >= 3.4 - fast but incompatible with Python 2
- 3 -> default in Python 3.0-3.3 - mildly fast
- 2 -> default in Python 2 - very slow, but compatible with Python 2/3
The default protocol kwarg is None, this will make an automatic choice
for pickle protocol that's best suited for the version of Python in
use. Note that this will make pickles generated by Py3 incompatible with
Py2.
verbose : bool
If True, will indicate progress and warn about problems.
Returns
-------
str
The path to the updated checkplot pickle file. If `outfile` was None and
`currentcp` was a filename, this will return `currentcp` to indicate
that the checkplot pickle file was updated in place.
'''
# break out python 2.7 and > 3 nonsense
if sys.version_info[:2] > (3,2):
# generate the outfile filename
if not outfile and isinstance(currentcp,str):
plotfpath = currentcp
elif outfile:
plotfpath = outfile
elif isinstance(currentcp, dict) and currentcp['objectid']:
if outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % currentcp['objectid']
else:
plotfpath = 'checkplot-%s.pkl' % currentcp['objectid']
else:
# we'll get this later below
plotfpath = None
if (isinstance(currentcp, str) and os.path.exists(currentcp)):
cp_current = _read_checkplot_picklefile(currentcp)
elif isinstance(currentcp, dict):
cp_current = currentcp
else:
LOGERROR('currentcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(currentcp), type(currentcp)))
return None
if (isinstance(updatedcp, str) and os.path.exists(updatedcp)):
cp_updated = _read_checkplot_picklefile(updatedcp)
elif isinstance(updatedcp, dict):
cp_updated = updatedcp
else:
LOGERROR('updatedcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(updatedcp), type(updatedcp)))
return None
# check for unicode in python 2.7
else:
# generate the outfile filename
if (not outfile and
(isinstance(currentcp, str) or isinstance(currentcp, unicode))):
plotfpath = currentcp
elif outfile:
plotfpath = outfile
elif isinstance(currentcp, dict) and currentcp['objectid']:
if outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % currentcp['objectid']
else:
plotfpath = 'checkplot-%s.pkl' % currentcp['objectid']
else:
# we'll get this later below
plotfpath = None
# get the current checkplotdict
if ((isinstance(currentcp, str) or isinstance(currentcp, unicode)) and
os.path.exists(currentcp)):
cp_current = _read_checkplot_picklefile(currentcp)
elif isinstance(currentcp,dict):
cp_current = currentcp
else:
LOGERROR('currentcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(currentcp), type(currentcp)))
return None
# get the updated checkplotdict
if ((isinstance(updatedcp, str) or isinstance(updatedcp, unicode)) and
os.path.exists(updatedcp)):
cp_updated = _read_checkplot_picklefile(updatedcp)
elif isinstance(updatedcp, dict):
cp_updated = updatedcp
else:
LOGERROR('updatedcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(updatedcp), type(updatedcp)))
return None
# do the update using python's dict update mechanism
# this requires updated to be in the same checkplotdict format as current
# all keys in current will now be from updated
cp_current.update(cp_updated)
# figure out the plotfpath if we haven't by now
if not plotfpath and outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % cp_current['objectid']
elif (not plotfpath) and (not outgzip):
plotfpath = 'checkplot-%s.pkl' % cp_current['objectid']
# make sure we write the correct postfix
if plotfpath.endswith('.gz'):
outgzip = True
# write the new checkplotdict
return _write_checkplot_picklefile(cp_current,
outfile=plotfpath,
outgzip=outgzip,
protocol=pickleprotocol) | This updates the current checkplotdict with updated values provided.
Parameters
----------
currentcp : dict or str
This is either a checkplotdict produced by `checkplot_pickle` above or a
checkplot pickle file produced by the same function. This checkplot will
be updated from the `updatedcp` checkplot.
updatedcp : dict or str
This is either a checkplotdict produced by `checkplot_pickle` above or a
checkplot pickle file produced by the same function. This checkplot will
be the source of the update to the `currentcp` checkplot.
outfile : str or None
The name of the output checkplot pickle file. The function will output
the new checkplot gzipped pickle file to `outfile` if outfile is a
filename. If `currentcp` is a file and `outfile`, this will be set to
that filename, so the function updates it in place.
outgzip : bool
This controls whether to gzip the output pickle. It turns out that this
is the slowest bit in the output process, so if you're after speed, best
not to use this. This is False by default since it turns out that gzip
actually doesn't save that much space (29 MB vs. 35 MB for the average
checkplot pickle).
pickleprotocol : int or None
This sets the pickle file protocol to use when writing the pickle:
If None, will choose a protocol using the following rules:
- 4 -> default in Python >= 3.4 - fast but incompatible with Python 2
- 3 -> default in Python 3.0-3.3 - mildly fast
- 2 -> default in Python 2 - very slow, but compatible with Python 2/3
The default protocol kwarg is None, this will make an automatic choice
for pickle protocol that's best suited for the version of Python in
use. Note that this will make pickles generated by Py3 incompatible with
Py2.
verbose : bool
If True, will indicate progress and warn about problems.
Returns
-------
str
The path to the updated checkplot pickle file. If `outfile` was None and
`currentcp` was a filename, this will return `currentcp` to indicate
that the checkplot pickle file was updated in place. | Below is the the instruction that describes the task:
### Input:
This updates the current checkplotdict with updated values provided.
Parameters
----------
currentcp : dict or str
This is either a checkplotdict produced by `checkplot_pickle` above or a
checkplot pickle file produced by the same function. This checkplot will
be updated from the `updatedcp` checkplot.
updatedcp : dict or str
This is either a checkplotdict produced by `checkplot_pickle` above or a
checkplot pickle file produced by the same function. This checkplot will
be the source of the update to the `currentcp` checkplot.
outfile : str or None
The name of the output checkplot pickle file. The function will output
the new checkplot gzipped pickle file to `outfile` if outfile is a
filename. If `currentcp` is a file and `outfile`, this will be set to
that filename, so the function updates it in place.
outgzip : bool
This controls whether to gzip the output pickle. It turns out that this
is the slowest bit in the output process, so if you're after speed, best
not to use this. This is False by default since it turns out that gzip
actually doesn't save that much space (29 MB vs. 35 MB for the average
checkplot pickle).
pickleprotocol : int or None
This sets the pickle file protocol to use when writing the pickle:
If None, will choose a protocol using the following rules:
- 4 -> default in Python >= 3.4 - fast but incompatible with Python 2
- 3 -> default in Python 3.0-3.3 - mildly fast
- 2 -> default in Python 2 - very slow, but compatible with Python 2/3
The default protocol kwarg is None, this will make an automatic choice
for pickle protocol that's best suited for the version of Python in
use. Note that this will make pickles generated by Py3 incompatible with
Py2.
verbose : bool
If True, will indicate progress and warn about problems.
Returns
-------
str
The path to the updated checkplot pickle file. If `outfile` was None and
`currentcp` was a filename, this will return `currentcp` to indicate
that the checkplot pickle file was updated in place.
### Response:
def checkplot_pickle_update(
currentcp,
updatedcp,
outfile=None,
outgzip=False,
pickleprotocol=None,
verbose=True
):
'''This updates the current checkplotdict with updated values provided.
Parameters
----------
currentcp : dict or str
This is either a checkplotdict produced by `checkplot_pickle` above or a
checkplot pickle file produced by the same function. This checkplot will
be updated from the `updatedcp` checkplot.
updatedcp : dict or str
This is either a checkplotdict produced by `checkplot_pickle` above or a
checkplot pickle file produced by the same function. This checkplot will
be the source of the update to the `currentcp` checkplot.
outfile : str or None
The name of the output checkplot pickle file. The function will output
the new checkplot gzipped pickle file to `outfile` if outfile is a
filename. If `currentcp` is a file and `outfile`, this will be set to
that filename, so the function updates it in place.
outgzip : bool
This controls whether to gzip the output pickle. It turns out that this
is the slowest bit in the output process, so if you're after speed, best
not to use this. This is False by default since it turns out that gzip
actually doesn't save that much space (29 MB vs. 35 MB for the average
checkplot pickle).
pickleprotocol : int or None
This sets the pickle file protocol to use when writing the pickle:
If None, will choose a protocol using the following rules:
- 4 -> default in Python >= 3.4 - fast but incompatible with Python 2
- 3 -> default in Python 3.0-3.3 - mildly fast
- 2 -> default in Python 2 - very slow, but compatible with Python 2/3
The default protocol kwarg is None, this will make an automatic choice
for pickle protocol that's best suited for the version of Python in
use. Note that this will make pickles generated by Py3 incompatible with
Py2.
verbose : bool
If True, will indicate progress and warn about problems.
Returns
-------
str
The path to the updated checkplot pickle file. If `outfile` was None and
`currentcp` was a filename, this will return `currentcp` to indicate
that the checkplot pickle file was updated in place.
'''
# break out python 2.7 and > 3 nonsense
if sys.version_info[:2] > (3,2):
# generate the outfile filename
if not outfile and isinstance(currentcp,str):
plotfpath = currentcp
elif outfile:
plotfpath = outfile
elif isinstance(currentcp, dict) and currentcp['objectid']:
if outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % currentcp['objectid']
else:
plotfpath = 'checkplot-%s.pkl' % currentcp['objectid']
else:
# we'll get this later below
plotfpath = None
if (isinstance(currentcp, str) and os.path.exists(currentcp)):
cp_current = _read_checkplot_picklefile(currentcp)
elif isinstance(currentcp, dict):
cp_current = currentcp
else:
LOGERROR('currentcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(currentcp), type(currentcp)))
return None
if (isinstance(updatedcp, str) and os.path.exists(updatedcp)):
cp_updated = _read_checkplot_picklefile(updatedcp)
elif isinstance(updatedcp, dict):
cp_updated = updatedcp
else:
LOGERROR('updatedcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(updatedcp), type(updatedcp)))
return None
# check for unicode in python 2.7
else:
# generate the outfile filename
if (not outfile and
(isinstance(currentcp, str) or isinstance(currentcp, unicode))):
plotfpath = currentcp
elif outfile:
plotfpath = outfile
elif isinstance(currentcp, dict) and currentcp['objectid']:
if outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % currentcp['objectid']
else:
plotfpath = 'checkplot-%s.pkl' % currentcp['objectid']
else:
# we'll get this later below
plotfpath = None
# get the current checkplotdict
if ((isinstance(currentcp, str) or isinstance(currentcp, unicode)) and
os.path.exists(currentcp)):
cp_current = _read_checkplot_picklefile(currentcp)
elif isinstance(currentcp,dict):
cp_current = currentcp
else:
LOGERROR('currentcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(currentcp), type(currentcp)))
return None
# get the updated checkplotdict
if ((isinstance(updatedcp, str) or isinstance(updatedcp, unicode)) and
os.path.exists(updatedcp)):
cp_updated = _read_checkplot_picklefile(updatedcp)
elif isinstance(updatedcp, dict):
cp_updated = updatedcp
else:
LOGERROR('updatedcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(updatedcp), type(updatedcp)))
return None
# do the update using python's dict update mechanism
# this requires updated to be in the same checkplotdict format as current
# all keys in current will now be from updated
cp_current.update(cp_updated)
# figure out the plotfpath if we haven't by now
if not plotfpath and outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % cp_current['objectid']
elif (not plotfpath) and (not outgzip):
plotfpath = 'checkplot-%s.pkl' % cp_current['objectid']
# make sure we write the correct postfix
if plotfpath.endswith('.gz'):
outgzip = True
# write the new checkplotdict
return _write_checkplot_picklefile(cp_current,
outfile=plotfpath,
outgzip=outgzip,
protocol=pickleprotocol) |
def is_game_over(self):
'''
Checks if the game is over due to checkmate, stalemate or
fourfold repetition.
'''
# Stalemate or checkmate.
try:
next(self.generate_legal_moves().__iter__())
except StopIteration:
return True
# Fourfold repetition.
if self.is_fourfold_repetition():
return True
return False | Checks if the game is over due to checkmate, stalemate or
fourfold repetition. | Below is the the instruction that describes the task:
### Input:
Checks if the game is over due to checkmate, stalemate or
fourfold repetition.
### Response:
def is_game_over(self):
'''
Checks if the game is over due to checkmate, stalemate or
fourfold repetition.
'''
# Stalemate or checkmate.
try:
next(self.generate_legal_moves().__iter__())
except StopIteration:
return True
# Fourfold repetition.
if self.is_fourfold_repetition():
return True
return False |
def _sign_url(self, base_url=None, params=None, client_secret=None):
""" Sign a request URL with a Crypto Key.
Usage:
from urlsigner import sign_url
signed_url = sign_url(base_url=my_url,
params=url_params,
client_secret=CLIENT_SECRET)
Args:
base_url - The trunk of the URL to sign. E.g. https://maps.googleapis.com/maps/api/geocode/json
params - List of tuples of URL parameters INCLUDING YOUR CLIENT ID ('client','gme-...')
client_secret - Your Crypto Key from Google for Work
Returns:
The signature as a dictionary #signed request URL
"""
import hashlib
import hmac
import base64
if six.PY3:
from urllib.parse import urlparse, urlencode
else:
from urllib import urlencode
from urlparse import urlparse
# Return if any parameters aren't given
if not base_url or not self.client_secret or not self.client:
return None
# assuming parameters will be submitted to Requests in identical order!
url = urlparse(base_url + "?" + urlencode(params))
# We only need to sign the path+query part of the string
url_to_sign = (url.path + "?" + url.query).encode('utf-8')
# Decode the private key into its binary format
# We need to decode the URL-encoded private key
decoded_key = base64.urlsafe_b64decode(client_secret)
# Create a signature using the private key and the URL-encoded
# string using HMAC SHA1. This signature will be binary.
signature = hmac.new(decoded_key, url_to_sign, hashlib.sha1)
# Encode the binary signature into base64 for use within a URL
encoded_signature = base64.urlsafe_b64encode(signature.digest())
# Return signature (to be appended as a 'signature' in params)
return encoded_signature | Sign a request URL with a Crypto Key.
Usage:
from urlsigner import sign_url
signed_url = sign_url(base_url=my_url,
params=url_params,
client_secret=CLIENT_SECRET)
Args:
base_url - The trunk of the URL to sign. E.g. https://maps.googleapis.com/maps/api/geocode/json
params - List of tuples of URL parameters INCLUDING YOUR CLIENT ID ('client','gme-...')
client_secret - Your Crypto Key from Google for Work
Returns:
The signature as a dictionary #signed request URL | Below is the the instruction that describes the task:
### Input:
Sign a request URL with a Crypto Key.
Usage:
from urlsigner import sign_url
signed_url = sign_url(base_url=my_url,
params=url_params,
client_secret=CLIENT_SECRET)
Args:
base_url - The trunk of the URL to sign. E.g. https://maps.googleapis.com/maps/api/geocode/json
params - List of tuples of URL parameters INCLUDING YOUR CLIENT ID ('client','gme-...')
client_secret - Your Crypto Key from Google for Work
Returns:
The signature as a dictionary #signed request URL
### Response:
def _sign_url(self, base_url=None, params=None, client_secret=None):
""" Sign a request URL with a Crypto Key.
Usage:
from urlsigner import sign_url
signed_url = sign_url(base_url=my_url,
params=url_params,
client_secret=CLIENT_SECRET)
Args:
base_url - The trunk of the URL to sign. E.g. https://maps.googleapis.com/maps/api/geocode/json
params - List of tuples of URL parameters INCLUDING YOUR CLIENT ID ('client','gme-...')
client_secret - Your Crypto Key from Google for Work
Returns:
The signature as a dictionary #signed request URL
"""
import hashlib
import hmac
import base64
if six.PY3:
from urllib.parse import urlparse, urlencode
else:
from urllib import urlencode
from urlparse import urlparse
# Return if any parameters aren't given
if not base_url or not self.client_secret or not self.client:
return None
# assuming parameters will be submitted to Requests in identical order!
url = urlparse(base_url + "?" + urlencode(params))
# We only need to sign the path+query part of the string
url_to_sign = (url.path + "?" + url.query).encode('utf-8')
# Decode the private key into its binary format
# We need to decode the URL-encoded private key
decoded_key = base64.urlsafe_b64decode(client_secret)
# Create a signature using the private key and the URL-encoded
# string using HMAC SHA1. This signature will be binary.
signature = hmac.new(decoded_key, url_to_sign, hashlib.sha1)
# Encode the binary signature into base64 for use within a URL
encoded_signature = base64.urlsafe_b64encode(signature.digest())
# Return signature (to be appended as a 'signature' in params)
return encoded_signature |
def get_user_uk(cookie, tokens):
'''获取用户的uk'''
url = 'http://yun.baidu.com'
req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
if req:
content = req.data.decode()
match = re.findall('/share/home\?uk=(\d+)" target=', content)
if len(match) == 1:
return match[0]
else:
logger.warn('pcs.get_user_uk(), failed to parse uk, %s' % url)
return None | 获取用户的uk | Below is the the instruction that describes the task:
### Input:
获取用户的uk
### Response:
def get_user_uk(cookie, tokens):
'''获取用户的uk'''
url = 'http://yun.baidu.com'
req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
if req:
content = req.data.decode()
match = re.findall('/share/home\?uk=(\d+)" target=', content)
if len(match) == 1:
return match[0]
else:
logger.warn('pcs.get_user_uk(), failed to parse uk, %s' % url)
return None |
def align(time, time2, magnitude, magnitude2, error, error2):
"""Synchronizes the light-curves in the two different bands.
Returns
-------
aligned_time
aligned_magnitude
aligned_magnitude2
aligned_error
aligned_error2
"""
error = np.zeros(time.shape) if error is None else error
error2 = np.zeros(time2.shape) if error2 is None else error2
# this asume that the first series is the short one
sserie = pd.DataFrame({"mag": magnitude, "error": error}, index=time)
lserie = pd.DataFrame({"mag": magnitude2, "error": error2}, index=time2)
# if the second serie is logest then revert
if len(time) > len(time2):
sserie, lserie = lserie, sserie
# make the merge
merged = sserie.join(lserie, how="inner", rsuffix='2')
# recreate columns
new_time = merged.index.values
new_mag, new_mag2 = merged.mag.values, merged.mag2.values
new_error, new_error2 = merged.error.values, merged.error2.values
if len(time) > len(time2):
new_mag, new_mag2 = new_mag2, new_mag
new_error, new_error2 = new_error2, new_error
return new_time, new_mag, new_mag2, new_error, new_error2 | Synchronizes the light-curves in the two different bands.
Returns
-------
aligned_time
aligned_magnitude
aligned_magnitude2
aligned_error
aligned_error2 | Below is the the instruction that describes the task:
### Input:
Synchronizes the light-curves in the two different bands.
Returns
-------
aligned_time
aligned_magnitude
aligned_magnitude2
aligned_error
aligned_error2
### Response:
def align(time, time2, magnitude, magnitude2, error, error2):
"""Synchronizes the light-curves in the two different bands.
Returns
-------
aligned_time
aligned_magnitude
aligned_magnitude2
aligned_error
aligned_error2
"""
error = np.zeros(time.shape) if error is None else error
error2 = np.zeros(time2.shape) if error2 is None else error2
# this asume that the first series is the short one
sserie = pd.DataFrame({"mag": magnitude, "error": error}, index=time)
lserie = pd.DataFrame({"mag": magnitude2, "error": error2}, index=time2)
# if the second serie is logest then revert
if len(time) > len(time2):
sserie, lserie = lserie, sserie
# make the merge
merged = sserie.join(lserie, how="inner", rsuffix='2')
# recreate columns
new_time = merged.index.values
new_mag, new_mag2 = merged.mag.values, merged.mag2.values
new_error, new_error2 = merged.error.values, merged.error2.values
if len(time) > len(time2):
new_mag, new_mag2 = new_mag2, new_mag
new_error, new_error2 = new_error2, new_error
return new_time, new_mag, new_mag2, new_error, new_error2 |
def _build(self, input_, prev_state):
"""Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
input_: a 2D Tensor of size [batch_size, input_size].
prev_state: a 2D Tensor of size [batch_size, hidden_size].
Returns:
output: a 2D Tensor of size [batch_size, hidden_size].
next_state: a Tensor of size [batch_size, hidden_size].
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
self._in_to_hidden_linear = basic.Linear(
self._hidden_size, name="in_to_hidden",
initializers=self._initializers.get("in_to_hidden"),
partitioners=self._partitioners.get("in_to_hidden"),
regularizers=self._regularizers.get("in_to_hidden"))
self._hidden_to_hidden_linear = basic.Linear(
self._hidden_size, name="hidden_to_hidden",
initializers=self._initializers.get("hidden_to_hidden"),
partitioners=self._partitioners.get("hidden_to_hidden"),
regularizers=self._regularizers.get("hidden_to_hidden"))
in_to_hidden = self._in_to_hidden_linear(input_)
hidden_to_hidden = self._hidden_to_hidden_linear(prev_state)
output = self._activation(in_to_hidden + hidden_to_hidden)
# For VanillaRNN, the next state of the RNN is the same as the output
return output, output | Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
input_: a 2D Tensor of size [batch_size, input_size].
prev_state: a 2D Tensor of size [batch_size, hidden_size].
Returns:
output: a 2D Tensor of size [batch_size, hidden_size].
next_state: a Tensor of size [batch_size, hidden_size].
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations. | Below is the the instruction that describes the task:
### Input:
Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
input_: a 2D Tensor of size [batch_size, input_size].
prev_state: a 2D Tensor of size [batch_size, hidden_size].
Returns:
output: a 2D Tensor of size [batch_size, hidden_size].
next_state: a Tensor of size [batch_size, hidden_size].
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
### Response:
def _build(self, input_, prev_state):
"""Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
input_: a 2D Tensor of size [batch_size, input_size].
prev_state: a 2D Tensor of size [batch_size, hidden_size].
Returns:
output: a 2D Tensor of size [batch_size, hidden_size].
next_state: a Tensor of size [batch_size, hidden_size].
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
self._in_to_hidden_linear = basic.Linear(
self._hidden_size, name="in_to_hidden",
initializers=self._initializers.get("in_to_hidden"),
partitioners=self._partitioners.get("in_to_hidden"),
regularizers=self._regularizers.get("in_to_hidden"))
self._hidden_to_hidden_linear = basic.Linear(
self._hidden_size, name="hidden_to_hidden",
initializers=self._initializers.get("hidden_to_hidden"),
partitioners=self._partitioners.get("hidden_to_hidden"),
regularizers=self._regularizers.get("hidden_to_hidden"))
in_to_hidden = self._in_to_hidden_linear(input_)
hidden_to_hidden = self._hidden_to_hidden_linear(prev_state)
output = self._activation(in_to_hidden + hidden_to_hidden)
# For VanillaRNN, the next state of the RNN is the same as the output
return output, output |
def generate_dict_schema(size, valid):
""" Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples?
:type valid: bool
:returns
"""
schema = {}
generator_items = []
# Generate schema
for i in range(0, size):
while True:
key_schema, key_generator = generate_random_schema(valid)
if key_schema not in schema:
break
value_schema, value_generator = generate_random_schema(valid)
schema[key_schema] = value_schema
generator_items.append((key_generator, value_generator))
# Samples
generator = ({next(k_gen): next(v_gen) for k_gen, v_gen in generator_items} for i in itertools.count())
# Finish
return schema, generator | Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples?
:type valid: bool
:returns | Below is the the instruction that describes the task:
### Input:
Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples?
:type valid: bool
:returns
### Response:
def generate_dict_schema(size, valid):
""" Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples?
:type valid: bool
:returns
"""
schema = {}
generator_items = []
# Generate schema
for i in range(0, size):
while True:
key_schema, key_generator = generate_random_schema(valid)
if key_schema not in schema:
break
value_schema, value_generator = generate_random_schema(valid)
schema[key_schema] = value_schema
generator_items.append((key_generator, value_generator))
# Samples
generator = ({next(k_gen): next(v_gen) for k_gen, v_gen in generator_items} for i in itertools.count())
# Finish
return schema, generator |
def create_free_shipping_promotion(cls, free_shipping_promotion, **kwargs):
"""Create FreeShippingPromotion
Create a new FreeShippingPromotion
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_free_shipping_promotion(free_shipping_promotion, async=True)
>>> result = thread.get()
:param async bool
:param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to create (required)
:return: FreeShippingPromotion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_free_shipping_promotion_with_http_info(free_shipping_promotion, **kwargs)
else:
(data) = cls._create_free_shipping_promotion_with_http_info(free_shipping_promotion, **kwargs)
return data | Create FreeShippingPromotion
Create a new FreeShippingPromotion
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_free_shipping_promotion(free_shipping_promotion, async=True)
>>> result = thread.get()
:param async bool
:param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to create (required)
:return: FreeShippingPromotion
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Create FreeShippingPromotion
Create a new FreeShippingPromotion
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_free_shipping_promotion(free_shipping_promotion, async=True)
>>> result = thread.get()
:param async bool
:param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to create (required)
:return: FreeShippingPromotion
If the method is called asynchronously,
returns the request thread.
### Response:
def create_free_shipping_promotion(cls, free_shipping_promotion, **kwargs):
"""Create FreeShippingPromotion
Create a new FreeShippingPromotion
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_free_shipping_promotion(free_shipping_promotion, async=True)
>>> result = thread.get()
:param async bool
:param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to create (required)
:return: FreeShippingPromotion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_free_shipping_promotion_with_http_info(free_shipping_promotion, **kwargs)
else:
(data) = cls._create_free_shipping_promotion_with_http_info(free_shipping_promotion, **kwargs)
return data |
def process_event(self, event_name: str, data: dict) -> None:
"""
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None
"""
if event_name == "after_epoch":
self.epochs_done = data["epochs_done"]
self.batches_seen = data["batches_seen"]
self.train_examples_seen = data["train_examples_seen"]
return | Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None | Below is the the instruction that describes the task:
### Input:
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None
### Response:
def process_event(self, event_name: str, data: dict) -> None:
"""
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None
"""
if event_name == "after_epoch":
self.epochs_done = data["epochs_done"]
self.batches_seen = data["batches_seen"]
self.train_examples_seen = data["train_examples_seen"]
return |
def cmd_land(self, args):
'''auto land commands'''
if len(args) < 1:
self.master.mav.command_long_send(self.settings.target_system,
0,
mavutil.mavlink.MAV_CMD_DO_LAND_START,
0, 0, 0, 0, 0, 0, 0, 0)
elif args[0] == 'abort':
self.master.mav.command_long_send(self.settings.target_system,
0,
mavutil.mavlink.MAV_CMD_DO_GO_AROUND,
0, 0, 0, 0, 0, 0, 0, 0)
else:
print("Usage: land [abort]") | auto land commands | Below is the the instruction that describes the task:
### Input:
auto land commands
### Response:
def cmd_land(self, args):
'''auto land commands'''
if len(args) < 1:
self.master.mav.command_long_send(self.settings.target_system,
0,
mavutil.mavlink.MAV_CMD_DO_LAND_START,
0, 0, 0, 0, 0, 0, 0, 0)
elif args[0] == 'abort':
self.master.mav.command_long_send(self.settings.target_system,
0,
mavutil.mavlink.MAV_CMD_DO_GO_AROUND,
0, 0, 0, 0, 0, 0, 0, 0)
else:
print("Usage: land [abort]") |
def get_type_name(self):
"""
Returns the type name of the PKCS7 structure
:return: A string with the typename
"""
nid = _lib.OBJ_obj2nid(self._pkcs7.type)
string_type = _lib.OBJ_nid2sn(nid)
return _ffi.string(string_type) | Returns the type name of the PKCS7 structure
:return: A string with the typename | Below is the the instruction that describes the task:
### Input:
Returns the type name of the PKCS7 structure
:return: A string with the typename
### Response:
def get_type_name(self):
"""
Returns the type name of the PKCS7 structure
:return: A string with the typename
"""
nid = _lib.OBJ_obj2nid(self._pkcs7.type)
string_type = _lib.OBJ_nid2sn(nid)
return _ffi.string(string_type) |
def apply_instance_data(designspace, include_filenames=None, Font=defcon.Font):
"""Open UFO instances referenced by designspace, apply Glyphs instance
data if present, re-save UFOs and return updated UFO Font objects.
Args:
designspace: DesignSpaceDocument object or path (str or PathLike) to
a designspace file.
include_filenames: optional set of instance filenames (relative to
the designspace path) to be included. By default all instaces are
processed.
Font: the class used to load the UFO (default: defcon.Font).
Returns:
List of opened and updated instance UFOs.
"""
from fontTools.designspaceLib import DesignSpaceDocument
from os.path import normcase, normpath
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = DesignSpaceDocument.fromfile(designspace)
basedir = os.path.dirname(designspace.path)
instance_ufos = []
if include_filenames is not None:
include_filenames = {normcase(normpath(p)) for p in include_filenames}
for designspace_instance in designspace.instances:
fname = designspace_instance.filename
assert fname is not None, "instance %r missing required filename" % getattr(
designspace_instance, "name", designspace_instance
)
if include_filenames is not None:
fname = normcase(normpath(fname))
if fname not in include_filenames:
continue
logger.debug("Applying instance data to %s", fname)
# fontmake <= 1.4.0 compares the ufo paths returned from this function
# to the keys of a dict of designspace locations that have been passed
# through normpath (but not normcase). We do the same.
ufo = Font(normpath(os.path.join(basedir, fname)))
set_weight_class(ufo, designspace, designspace_instance)
set_width_class(ufo, designspace, designspace_instance)
glyphs_instance = InstanceDescriptorAsGSInstance(designspace_instance)
to_ufo_custom_params(None, ufo, glyphs_instance)
ufo.save()
instance_ufos.append(ufo)
return instance_ufos | Open UFO instances referenced by designspace, apply Glyphs instance
data if present, re-save UFOs and return updated UFO Font objects.
Args:
designspace: DesignSpaceDocument object or path (str or PathLike) to
a designspace file.
include_filenames: optional set of instance filenames (relative to
the designspace path) to be included. By default all instaces are
processed.
Font: the class used to load the UFO (default: defcon.Font).
Returns:
List of opened and updated instance UFOs. | Below is the the instruction that describes the task:
### Input:
Open UFO instances referenced by designspace, apply Glyphs instance
data if present, re-save UFOs and return updated UFO Font objects.
Args:
designspace: DesignSpaceDocument object or path (str or PathLike) to
a designspace file.
include_filenames: optional set of instance filenames (relative to
the designspace path) to be included. By default all instaces are
processed.
Font: the class used to load the UFO (default: defcon.Font).
Returns:
List of opened and updated instance UFOs.
### Response:
def apply_instance_data(designspace, include_filenames=None, Font=defcon.Font):
"""Open UFO instances referenced by designspace, apply Glyphs instance
data if present, re-save UFOs and return updated UFO Font objects.
Args:
designspace: DesignSpaceDocument object or path (str or PathLike) to
a designspace file.
include_filenames: optional set of instance filenames (relative to
the designspace path) to be included. By default all instaces are
processed.
Font: the class used to load the UFO (default: defcon.Font).
Returns:
List of opened and updated instance UFOs.
"""
from fontTools.designspaceLib import DesignSpaceDocument
from os.path import normcase, normpath
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = DesignSpaceDocument.fromfile(designspace)
basedir = os.path.dirname(designspace.path)
instance_ufos = []
if include_filenames is not None:
include_filenames = {normcase(normpath(p)) for p in include_filenames}
for designspace_instance in designspace.instances:
fname = designspace_instance.filename
assert fname is not None, "instance %r missing required filename" % getattr(
designspace_instance, "name", designspace_instance
)
if include_filenames is not None:
fname = normcase(normpath(fname))
if fname not in include_filenames:
continue
logger.debug("Applying instance data to %s", fname)
# fontmake <= 1.4.0 compares the ufo paths returned from this function
# to the keys of a dict of designspace locations that have been passed
# through normpath (but not normcase). We do the same.
ufo = Font(normpath(os.path.join(basedir, fname)))
set_weight_class(ufo, designspace, designspace_instance)
set_width_class(ufo, designspace, designspace_instance)
glyphs_instance = InstanceDescriptorAsGSInstance(designspace_instance)
to_ufo_custom_params(None, ufo, glyphs_instance)
ufo.save()
instance_ufos.append(ufo)
return instance_ufos |
def conference(self, id, **options):
"""
This object allows multiple lines in separate sessions to be conferenced together so that the parties on each line can talk to each other simultaneously.
This is a voice channel only feature.
Argument: "id" is a String
Argument: **options is a set of optional keyword arguments.
See https://www.tropo.com/docs/webapi/conference
"""
self._steps.append(Conference(id, **options).obj) | This object allows multiple lines in separate sessions to be conferenced together so that the parties on each line can talk to each other simultaneously.
This is a voice channel only feature.
Argument: "id" is a String
Argument: **options is a set of optional keyword arguments.
See https://www.tropo.com/docs/webapi/conference | Below is the the instruction that describes the task:
### Input:
This object allows multiple lines in separate sessions to be conferenced together so that the parties on each line can talk to each other simultaneously.
This is a voice channel only feature.
Argument: "id" is a String
Argument: **options is a set of optional keyword arguments.
See https://www.tropo.com/docs/webapi/conference
### Response:
def conference(self, id, **options):
"""
This object allows multiple lines in separate sessions to be conferenced together so that the parties on each line can talk to each other simultaneously.
This is a voice channel only feature.
Argument: "id" is a String
Argument: **options is a set of optional keyword arguments.
See https://www.tropo.com/docs/webapi/conference
"""
self._steps.append(Conference(id, **options).obj) |
def characteristics_resolved(self):
"""
Called when all service's characteristics got resolved.
"""
self._disconnect_characteristic_signals()
characteristics_regex = re.compile(self._path + '/char[0-9abcdef]{4}$')
managed_characteristics = [
char for char in self._object_manager.GetManagedObjects().items()
if characteristics_regex.match(char[0])]
self.characteristics = [Characteristic(
service=self,
path=c[0],
uuid=c[1]['org.bluez.GattCharacteristic1']['UUID']) for c in managed_characteristics]
self._connect_characteristic_signals() | Called when all service's characteristics got resolved. | Below is the the instruction that describes the task:
### Input:
Called when all service's characteristics got resolved.
### Response:
def characteristics_resolved(self):
"""
Called when all service's characteristics got resolved.
"""
self._disconnect_characteristic_signals()
characteristics_regex = re.compile(self._path + '/char[0-9abcdef]{4}$')
managed_characteristics = [
char for char in self._object_manager.GetManagedObjects().items()
if characteristics_regex.match(char[0])]
self.characteristics = [Characteristic(
service=self,
path=c[0],
uuid=c[1]['org.bluez.GattCharacteristic1']['UUID']) for c in managed_characteristics]
self._connect_characteristic_signals() |
def update(self, table, columns, values):
"""Update one or more existing table rows.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified.
"""
self._mutations.append(Mutation(update=_make_write_pb(table, columns, values))) | Update one or more existing table rows.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified. | Below is the the instruction that describes the task:
### Input:
Update one or more existing table rows.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified.
### Response:
def update(self, table, columns, values):
"""Update one or more existing table rows.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified.
"""
self._mutations.append(Mutation(update=_make_write_pb(table, columns, values))) |
def nvmlDeviceGetPowerUsage(handle):
r"""
/**
* Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
*
* For Fermi &tm; or newer fully supported devices.
*
* On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw.
*
* It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode.
*
* @param device The identifier of the target device
* @param power Reference in which to return the power usage information
*
* @return
* - \ref NVML_SUCCESS if \a power has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage
"""
c_watts = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerUsage")
ret = fn(handle, byref(c_watts))
_nvmlCheckReturn(ret)
return bytes_to_str(c_watts.value) | r"""
/**
* Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
*
* For Fermi &tm; or newer fully supported devices.
*
* On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw.
*
* It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode.
*
* @param device The identifier of the target device
* @param power Reference in which to return the power usage information
*
* @return
* - \ref NVML_SUCCESS if \a power has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage | Below is the the instruction that describes the task:
### Input:
r"""
/**
* Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
*
* For Fermi &tm; or newer fully supported devices.
*
* On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw.
*
* It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode.
*
* @param device The identifier of the target device
* @param power Reference in which to return the power usage information
*
* @return
* - \ref NVML_SUCCESS if \a power has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage
### Response:
def nvmlDeviceGetPowerUsage(handle):
r"""
/**
* Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
*
* For Fermi &tm; or newer fully supported devices.
*
* On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw.
*
* It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode.
*
* @param device The identifier of the target device
* @param power Reference in which to return the power usage information
*
* @return
* - \ref NVML_SUCCESS if \a power has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage
"""
c_watts = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerUsage")
ret = fn(handle, byref(c_watts))
_nvmlCheckReturn(ret)
return bytes_to_str(c_watts.value) |
def meta_changed_notify_after(self, state_machine_m, _, info):
"""Handle notification about the change of a state's meta data
The meta data of the affected state(s) are read and the view updated accordingly.
:param StateMachineModel state_machine_m: Always the state machine model belonging to this editor
:param str _: Always "state_meta_signal"
:param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value
"""
meta_signal_message = info['arg']
if meta_signal_message.origin == "graphical_editor_gaphas": # Ignore changes caused by ourself
return
if meta_signal_message.origin == "load_meta_data": # Meta data can't be applied, as the view has not yet
return # been created
notification = meta_signal_message.notification
if not notification: # For changes applied to the root state, there are always two notifications
return # Ignore the one with less information
if self.model.ongoing_complex_actions:
return
model = notification.model
view = self.canvas.get_view_for_model(model)
if meta_signal_message.change == 'show_content':
library_state_m = model
library_state_v = view
if library_state_m.meta['gui']['show_content'] is not library_state_m.show_content():
logger.warning("The content of the LibraryState won't be shown, because "
"MAX_VISIBLE_LIBRARY_HIERARCHY is 1.")
if library_state_m.show_content():
if not library_state_m.state_copy_initialized:
logger.warning("Show library content without initialized state copy does not work {0}"
"".format(library_state_m))
logger.debug("Show content of {}".format(library_state_m.state))
gui_helper_meta_data.scale_library_content(library_state_m)
self.add_state_view_for_model(library_state_m.state_copy, view,
hierarchy_level=library_state_v.hierarchy_level + 1)
else:
logger.debug("Hide content of {}".format(library_state_m.state))
state_copy_v = self.canvas.get_view_for_model(library_state_m.state_copy)
if state_copy_v:
state_copy_v.remove()
else:
if isinstance(view, StateView):
view.apply_meta_data(recursive=meta_signal_message.affects_children)
else:
view.apply_meta_data()
self.canvas.request_update(view, matrix=True)
self.canvas.wait_for_update() | Handle notification about the change of a state's meta data
The meta data of the affected state(s) are read and the view updated accordingly.
:param StateMachineModel state_machine_m: Always the state machine model belonging to this editor
:param str _: Always "state_meta_signal"
:param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value | Below is the the instruction that describes the task:
### Input:
Handle notification about the change of a state's meta data
The meta data of the affected state(s) are read and the view updated accordingly.
:param StateMachineModel state_machine_m: Always the state machine model belonging to this editor
:param str _: Always "state_meta_signal"
:param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value
### Response:
def meta_changed_notify_after(self, state_machine_m, _, info):
"""Handle notification about the change of a state's meta data
The meta data of the affected state(s) are read and the view updated accordingly.
:param StateMachineModel state_machine_m: Always the state machine model belonging to this editor
:param str _: Always "state_meta_signal"
:param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value
"""
meta_signal_message = info['arg']
if meta_signal_message.origin == "graphical_editor_gaphas": # Ignore changes caused by ourself
return
if meta_signal_message.origin == "load_meta_data": # Meta data can't be applied, as the view has not yet
return # been created
notification = meta_signal_message.notification
if not notification: # For changes applied to the root state, there are always two notifications
return # Ignore the one with less information
if self.model.ongoing_complex_actions:
return
model = notification.model
view = self.canvas.get_view_for_model(model)
if meta_signal_message.change == 'show_content':
library_state_m = model
library_state_v = view
if library_state_m.meta['gui']['show_content'] is not library_state_m.show_content():
logger.warning("The content of the LibraryState won't be shown, because "
"MAX_VISIBLE_LIBRARY_HIERARCHY is 1.")
if library_state_m.show_content():
if not library_state_m.state_copy_initialized:
logger.warning("Show library content without initialized state copy does not work {0}"
"".format(library_state_m))
logger.debug("Show content of {}".format(library_state_m.state))
gui_helper_meta_data.scale_library_content(library_state_m)
self.add_state_view_for_model(library_state_m.state_copy, view,
hierarchy_level=library_state_v.hierarchy_level + 1)
else:
logger.debug("Hide content of {}".format(library_state_m.state))
state_copy_v = self.canvas.get_view_for_model(library_state_m.state_copy)
if state_copy_v:
state_copy_v.remove()
else:
if isinstance(view, StateView):
view.apply_meta_data(recursive=meta_signal_message.affects_children)
else:
view.apply_meta_data()
self.canvas.request_update(view, matrix=True)
self.canvas.wait_for_update() |
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret | Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01 | Below is the the instruction that describes the task:
### Input:
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
### Response:
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret |
def is_reference_target(resource, rtype, label):
""" Return true if the resource has this rtype with this label """
prop = resource.props.references.get(rtype, False)
if prop:
return label in prop | Return true if the resource has this rtype with this label | Below is the the instruction that describes the task:
### Input:
Return true if the resource has this rtype with this label
### Response:
def is_reference_target(resource, rtype, label):
""" Return true if the resource has this rtype with this label """
prop = resource.props.references.get(rtype, False)
if prop:
return label in prop |
def index_row(self, dataframe):
"""
Indexes the row based on the request parameters.
"""
return dataframe.loc[self.kwargs[self.lookup_url_kwarg]].to_frame().T | Indexes the row based on the request parameters. | Below is the the instruction that describes the task:
### Input:
Indexes the row based on the request parameters.
### Response:
def index_row(self, dataframe):
"""
Indexes the row based on the request parameters.
"""
return dataframe.loc[self.kwargs[self.lookup_url_kwarg]].to_frame().T |
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name) | Connect to all repos and download metadata | Below is the the instruction that describes the task:
### Input:
Connect to all repos and download metadata
### Response:
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name) |
def cp_als(X, rank, random_state=None, init='randn', **options):
"""Fits CP Decomposition using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Alternating Least Squares (ALS) is a very old and reliable method for
fitting CP decompositions. This is likely a good first algorithm to try.
References
----------
Kolda, T. G. & Bader, B. W.
"Tensor Decompositions and Applications."
SIAM Rev. 51 (2009): 455-500
http://epubs.siam.org/doi/pdf/10.1137/07070111X
Comon, Pierre & Xavier Luciani & Andre De Almeida.
"Tensor decompositions, alternating least squares and other tales."
Journal of chemometrics 23 (2009): 393-405.
http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract
Examples
--------
```
import tensortools as tt
I, J, K, R = 20, 20, 20, 4
X = tt.randn_tensor(I, J, K, rank=R)
tt.cp_als(X, rank=R)
```
"""
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'CP_ALS', **options)
# Main optimization loop.
while result.still_optimizing:
# Iterate over each tensor mode.
for n in range(X.ndim):
# i) Normalize factors to prevent singularities.
U.rebalance()
# ii) Compute the N-1 gram matrices.
components = [U[j] for j in range(X.ndim) if j != n]
grams = sci.multiply.reduce([sci.dot(u.T, u) for u in components])
# iii) Compute Khatri-Rao product.
kr = khatri_rao(components)
# iv) Form normal equations and solve via Cholesky
c = linalg.cho_factor(grams, overwrite_a=False)
p = unfold(X, n).dot(kr)
U[n] = linalg.cho_solve(c, p.T, overwrite_b=False).T
# U[n] = linalg.solve(grams, unfold(X, n).dot(kr).T).T
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[-1].T.dot(U[-1])
# obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX
obj = linalg.norm(U.full() - X) / normX
# Update result
result.update(obj)
# Finalize and return the optimization result.
return result.finalize() | Fits CP Decomposition using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Alternating Least Squares (ALS) is a very old and reliable method for
fitting CP decompositions. This is likely a good first algorithm to try.
References
----------
Kolda, T. G. & Bader, B. W.
"Tensor Decompositions and Applications."
SIAM Rev. 51 (2009): 455-500
http://epubs.siam.org/doi/pdf/10.1137/07070111X
Comon, Pierre & Xavier Luciani & Andre De Almeida.
"Tensor decompositions, alternating least squares and other tales."
Journal of chemometrics 23 (2009): 393-405.
http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract
Examples
--------
```
import tensortools as tt
I, J, K, R = 20, 20, 20, 4
X = tt.randn_tensor(I, J, K, rank=R)
tt.cp_als(X, rank=R)
``` | Below is the the instruction that describes the task:
### Input:
Fits CP Decomposition using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Alternating Least Squares (ALS) is a very old and reliable method for
fitting CP decompositions. This is likely a good first algorithm to try.
References
----------
Kolda, T. G. & Bader, B. W.
"Tensor Decompositions and Applications."
SIAM Rev. 51 (2009): 455-500
http://epubs.siam.org/doi/pdf/10.1137/07070111X
Comon, Pierre & Xavier Luciani & Andre De Almeida.
"Tensor decompositions, alternating least squares and other tales."
Journal of chemometrics 23 (2009): 393-405.
http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract
Examples
--------
```
import tensortools as tt
I, J, K, R = 20, 20, 20, 4
X = tt.randn_tensor(I, J, K, rank=R)
tt.cp_als(X, rank=R)
```
### Response:
def cp_als(X, rank, random_state=None, init='randn', **options):
"""Fits CP Decomposition using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Alternating Least Squares (ALS) is a very old and reliable method for
fitting CP decompositions. This is likely a good first algorithm to try.
References
----------
Kolda, T. G. & Bader, B. W.
"Tensor Decompositions and Applications."
SIAM Rev. 51 (2009): 455-500
http://epubs.siam.org/doi/pdf/10.1137/07070111X
Comon, Pierre & Xavier Luciani & Andre De Almeida.
"Tensor decompositions, alternating least squares and other tales."
Journal of chemometrics 23 (2009): 393-405.
http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract
Examples
--------
```
import tensortools as tt
I, J, K, R = 20, 20, 20, 4
X = tt.randn_tensor(I, J, K, rank=R)
tt.cp_als(X, rank=R)
```
"""
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'CP_ALS', **options)
# Main optimization loop.
while result.still_optimizing:
# Iterate over each tensor mode.
for n in range(X.ndim):
# i) Normalize factors to prevent singularities.
U.rebalance()
# ii) Compute the N-1 gram matrices.
components = [U[j] for j in range(X.ndim) if j != n]
grams = sci.multiply.reduce([sci.dot(u.T, u) for u in components])
# iii) Compute Khatri-Rao product.
kr = khatri_rao(components)
# iv) Form normal equations and solve via Cholesky
c = linalg.cho_factor(grams, overwrite_a=False)
p = unfold(X, n).dot(kr)
U[n] = linalg.cho_solve(c, p.T, overwrite_b=False).T
# U[n] = linalg.solve(grams, unfold(X, n).dot(kr).T).T
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[-1].T.dot(U[-1])
# obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX
obj = linalg.norm(U.full() - X) / normX
# Update result
result.update(obj)
# Finalize and return the optimization result.
return result.finalize() |
def append_dynamic(self, t, dynamic, canvas=0, color='blue'):
"""!
@brief Append single dynamic to specified canvas (by default to the first with index '0').
@param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.
@param[in] dynamic (list): Value points of dynamic that are considered on an Y axis.
@param[in] canvas (uint): Canvas where dynamic should be displayed.
@param[in] color (string): Color that is used for drawing dynamic on the canvas.
"""
description = dynamic_descr(canvas, t, dynamic, False, color);
self.__dynamic_storage.append(description);
self.__update_canvas_xlim(description.time, description.separate); | !
@brief Append single dynamic to specified canvas (by default to the first with index '0').
@param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.
@param[in] dynamic (list): Value points of dynamic that are considered on an Y axis.
@param[in] canvas (uint): Canvas where dynamic should be displayed.
@param[in] color (string): Color that is used for drawing dynamic on the canvas. | Below is the the instruction that describes the task:
### Input:
!
@brief Append single dynamic to specified canvas (by default to the first with index '0').
@param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.
@param[in] dynamic (list): Value points of dynamic that are considered on an Y axis.
@param[in] canvas (uint): Canvas where dynamic should be displayed.
@param[in] color (string): Color that is used for drawing dynamic on the canvas.
### Response:
def append_dynamic(self, t, dynamic, canvas=0, color='blue'):
"""!
@brief Append single dynamic to specified canvas (by default to the first with index '0').
@param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.
@param[in] dynamic (list): Value points of dynamic that are considered on an Y axis.
@param[in] canvas (uint): Canvas where dynamic should be displayed.
@param[in] color (string): Color that is used for drawing dynamic on the canvas.
"""
description = dynamic_descr(canvas, t, dynamic, False, color);
self.__dynamic_storage.append(description);
self.__update_canvas_xlim(description.time, description.separate); |
def diagnostic_send(self, diagFl1, diagFl2, diagFl3, diagSh1, diagSh2, diagSh3, force_mavlink1=False):
'''
Configurable diagnostic messages.
diagFl1 : Diagnostic float 1 (float)
diagFl2 : Diagnostic float 2 (float)
diagFl3 : Diagnostic float 3 (float)
diagSh1 : Diagnostic short 1 (int16_t)
diagSh2 : Diagnostic short 2 (int16_t)
diagSh3 : Diagnostic short 3 (int16_t)
'''
return self.send(self.diagnostic_encode(diagFl1, diagFl2, diagFl3, diagSh1, diagSh2, diagSh3), force_mavlink1=force_mavlink1) | Configurable diagnostic messages.
diagFl1 : Diagnostic float 1 (float)
diagFl2 : Diagnostic float 2 (float)
diagFl3 : Diagnostic float 3 (float)
diagSh1 : Diagnostic short 1 (int16_t)
diagSh2 : Diagnostic short 2 (int16_t)
diagSh3 : Diagnostic short 3 (int16_t) | Below is the the instruction that describes the task:
### Input:
Configurable diagnostic messages.
diagFl1 : Diagnostic float 1 (float)
diagFl2 : Diagnostic float 2 (float)
diagFl3 : Diagnostic float 3 (float)
diagSh1 : Diagnostic short 1 (int16_t)
diagSh2 : Diagnostic short 2 (int16_t)
diagSh3 : Diagnostic short 3 (int16_t)
### Response:
def diagnostic_send(self, diagFl1, diagFl2, diagFl3, diagSh1, diagSh2, diagSh3, force_mavlink1=False):
'''
Configurable diagnostic messages.
diagFl1 : Diagnostic float 1 (float)
diagFl2 : Diagnostic float 2 (float)
diagFl3 : Diagnostic float 3 (float)
diagSh1 : Diagnostic short 1 (int16_t)
diagSh2 : Diagnostic short 2 (int16_t)
diagSh3 : Diagnostic short 3 (int16_t)
'''
return self.send(self.diagnostic_encode(diagFl1, diagFl2, diagFl3, diagSh1, diagSh2, diagSh3), force_mavlink1=force_mavlink1) |
def getpreferredencoding():
"""Return preferred encoding for text I/O."""
encoding = locale.getpreferredencoding(False)
if sys.platform == 'darwin' and encoding.startswith('mac-'):
# Upgrade ancient MacOS encodings in Python < 2.7
encoding = 'utf-8'
return encoding | Return preferred encoding for text I/O. | Below is the the instruction that describes the task:
### Input:
Return preferred encoding for text I/O.
### Response:
def getpreferredencoding():
"""Return preferred encoding for text I/O."""
encoding = locale.getpreferredencoding(False)
if sys.platform == 'darwin' and encoding.startswith('mac-'):
# Upgrade ancient MacOS encodings in Python < 2.7
encoding = 'utf-8'
return encoding |
def _is_pid_running_on_unix(pid):
"""
Check if PID is running for Unix systems.
"""
try:
os.kill(pid, 0)
except OSError as err:
# if error is ESRCH, it means the process doesn't exist
return not err.errno == os.errno.ESRCH
return True | Check if PID is running for Unix systems. | Below is the the instruction that describes the task:
### Input:
Check if PID is running for Unix systems.
### Response:
def _is_pid_running_on_unix(pid):
"""
Check if PID is running for Unix systems.
"""
try:
os.kill(pid, 0)
except OSError as err:
# if error is ESRCH, it means the process doesn't exist
return not err.errno == os.errno.ESRCH
return True |
def token(self):
"""
Returns authorization token provided by Cocaine.
The real meaning of the token is determined by its type. For example OAUTH2 token will
have "bearer" type.
:return: A tuple of token type and body.
"""
if self._token is None:
token_type = os.getenv(TOKEN_TYPE_KEY, '')
token_body = os.getenv(TOKEN_BODY_KEY, '')
self._token = _Token(token_type, token_body)
return self._token | Returns authorization token provided by Cocaine.
The real meaning of the token is determined by its type. For example OAUTH2 token will
have "bearer" type.
:return: A tuple of token type and body. | Below is the the instruction that describes the task:
### Input:
Returns authorization token provided by Cocaine.
The real meaning of the token is determined by its type. For example OAUTH2 token will
have "bearer" type.
:return: A tuple of token type and body.
### Response:
def token(self):
"""
Returns authorization token provided by Cocaine.
The real meaning of the token is determined by its type. For example OAUTH2 token will
have "bearer" type.
:return: A tuple of token type and body.
"""
if self._token is None:
token_type = os.getenv(TOKEN_TYPE_KEY, '')
token_body = os.getenv(TOKEN_BODY_KEY, '')
self._token = _Token(token_type, token_body)
return self._token |
def _mitogen_reset(self, mode):
"""
Forget everything we know about the connected context. This function
cannot be called _reset() since that name is used as a public API by
Ansible 2.4 wait_for_connection plug-in.
:param str mode:
Name of ContextService method to use to discard the context, either
'put' or 'reset'.
"""
if not self.context:
return
self.chain.reset()
self.parent.call_service(
service_name='ansible_mitogen.services.ContextService',
method_name=mode,
context=self.context
)
self.context = None
self.login_context = None
self.init_child_result = None
self.chain = None | Forget everything we know about the connected context. This function
cannot be called _reset() since that name is used as a public API by
Ansible 2.4 wait_for_connection plug-in.
:param str mode:
Name of ContextService method to use to discard the context, either
'put' or 'reset'. | Below is the the instruction that describes the task:
### Input:
Forget everything we know about the connected context. This function
cannot be called _reset() since that name is used as a public API by
Ansible 2.4 wait_for_connection plug-in.
:param str mode:
Name of ContextService method to use to discard the context, either
'put' or 'reset'.
### Response:
def _mitogen_reset(self, mode):
"""
Forget everything we know about the connected context. This function
cannot be called _reset() since that name is used as a public API by
Ansible 2.4 wait_for_connection plug-in.
:param str mode:
Name of ContextService method to use to discard the context, either
'put' or 'reset'.
"""
if not self.context:
return
self.chain.reset()
self.parent.call_service(
service_name='ansible_mitogen.services.ContextService',
method_name=mode,
context=self.context
)
self.context = None
self.login_context = None
self.init_child_result = None
self.chain = None |
def createNewOoid(timestamp=None, depth=None):
"""Create a new Ooid for a given time, to be stored at a given depth
timestamp: the year-month-day is encoded in the ooid. If none, use current day
depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth
returns a new opaque id string holding 24 random hex digits and encoded date and depth info
"""
if not timestamp:
timestamp = utc_now().date()
if not depth:
depth = defaultDepth
assert depth <= 4 and depth >=1
uuid = str(uu.uuid4())
return "%s%d%02d%02d%02d" %(uuid[:-7],depth,timestamp.year%100,timestamp.month,timestamp.day) | Create a new Ooid for a given time, to be stored at a given depth
timestamp: the year-month-day is encoded in the ooid. If none, use current day
depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth
returns a new opaque id string holding 24 random hex digits and encoded date and depth info | Below is the the instruction that describes the task:
### Input:
Create a new Ooid for a given time, to be stored at a given depth
timestamp: the year-month-day is encoded in the ooid. If none, use current day
depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth
returns a new opaque id string holding 24 random hex digits and encoded date and depth info
### Response:
def createNewOoid(timestamp=None, depth=None):
"""Create a new Ooid for a given time, to be stored at a given depth
timestamp: the year-month-day is encoded in the ooid. If none, use current day
depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth
returns a new opaque id string holding 24 random hex digits and encoded date and depth info
"""
if not timestamp:
timestamp = utc_now().date()
if not depth:
depth = defaultDepth
assert depth <= 4 and depth >=1
uuid = str(uu.uuid4())
return "%s%d%02d%02d%02d" %(uuid[:-7],depth,timestamp.year%100,timestamp.month,timestamp.day) |
def synonyms(self):
"""Return a dict of term synonyms"""
syns = {}
for k, v in self._declared_terms.items():
k = k.strip()
if v.get('synonym'):
syns[k.lower()] = v['synonym']
if not '.' in k:
syns[ROOT_TERM + '.' + k.lower()] = v['synonym']
return syns | Return a dict of term synonyms | Below is the the instruction that describes the task:
### Input:
Return a dict of term synonyms
### Response:
def synonyms(self):
"""Return a dict of term synonyms"""
syns = {}
for k, v in self._declared_terms.items():
k = k.strip()
if v.get('synonym'):
syns[k.lower()] = v['synonym']
if not '.' in k:
syns[ROOT_TERM + '.' + k.lower()] = v['synonym']
return syns |
def reassign(self, user_ids, requester):
"""Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign
"""
path = '{0}'.format(self.collection.name)
assignments = []
if not user_ids:
raise Error('Must pass at least one user id')
for user_id in user_ids:
ref = {
"assignee": {
"id": user_id,
"type": "user_reference"
}
}
assignments.append(ref)
data = {
"incidents": [
{
"id": self.id,
"type": "incident_reference",
"assignments": assignments
}
]
}
extra_headers = {"From": requester}
return self.pagerduty.request('PUT', path, data=_json_dumper(data), extra_headers=extra_headers) | Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign | Below is the the instruction that describes the task:
### Input:
Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign
### Response:
def reassign(self, user_ids, requester):
"""Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign
"""
path = '{0}'.format(self.collection.name)
assignments = []
if not user_ids:
raise Error('Must pass at least one user id')
for user_id in user_ids:
ref = {
"assignee": {
"id": user_id,
"type": "user_reference"
}
}
assignments.append(ref)
data = {
"incidents": [
{
"id": self.id,
"type": "incident_reference",
"assignments": assignments
}
]
}
extra_headers = {"From": requester}
return self.pagerduty.request('PUT', path, data=_json_dumper(data), extra_headers=extra_headers) |
def modify_signature(self, signature):
""" Modify an existing signature
Can modify the content, contenttype and name. An unset attribute will
not delete the attribute but leave it untouched.
:param: signature a zobject.Signature object, with modified
content/contentype/name, the id should be present and
valid, the name does not allows to identify the
signature for that operation.
"""
# if no content is specified, just use a selector (id/name)
dic = signature.to_creator(for_modify=True)
self.request('ModifySignature', {'signature': dic}) | Modify an existing signature
Can modify the content, contenttype and name. An unset attribute will
not delete the attribute but leave it untouched.
:param: signature a zobject.Signature object, with modified
content/contentype/name, the id should be present and
valid, the name does not allows to identify the
signature for that operation. | Below is the the instruction that describes the task:
### Input:
Modify an existing signature
Can modify the content, contenttype and name. An unset attribute will
not delete the attribute but leave it untouched.
:param: signature a zobject.Signature object, with modified
content/contentype/name, the id should be present and
valid, the name does not allows to identify the
signature for that operation.
### Response:
def modify_signature(self, signature):
""" Modify an existing signature
Can modify the content, contenttype and name. An unset attribute will
not delete the attribute but leave it untouched.
:param: signature a zobject.Signature object, with modified
content/contentype/name, the id should be present and
valid, the name does not allows to identify the
signature for that operation.
"""
# if no content is specified, just use a selector (id/name)
dic = signature.to_creator(for_modify=True)
self.request('ModifySignature', {'signature': dic}) |
def _is_chunk_markdown(source):
"""Return whether a chunk contains Markdown contents."""
lines = source.splitlines()
if all(line.startswith('# ') for line in lines):
# The chunk is a Markdown *unless* it is commented Python code.
source = '\n'.join(line[2:] for line in lines
if not line[2:].startswith('#')) # skip headers
if not source:
return True
# Try to parse the chunk: if it fails, it is Markdown, otherwise,
# it is Python.
return not _is_python(source)
return False | Return whether a chunk contains Markdown contents. | Below is the the instruction that describes the task:
### Input:
Return whether a chunk contains Markdown contents.
### Response:
def _is_chunk_markdown(source):
"""Return whether a chunk contains Markdown contents."""
lines = source.splitlines()
if all(line.startswith('# ') for line in lines):
# The chunk is a Markdown *unless* it is commented Python code.
source = '\n'.join(line[2:] for line in lines
if not line[2:].startswith('#')) # skip headers
if not source:
return True
# Try to parse the chunk: if it fails, it is Markdown, otherwise,
# it is Python.
return not _is_python(source)
return False |
def start_output (self):
"""Write start of checking info as DOT comment."""
super(DOTLogger, self).start_output()
if self.has_part("intro"):
self.write_intro()
self.writeln()
self.writeln(u"digraph G {")
self.writeln(u" graph [")
self.writeln(u" charset=\"%s\"," % self.get_charset_encoding())
self.writeln(u" ];")
self.flush() | Write start of checking info as DOT comment. | Below is the the instruction that describes the task:
### Input:
Write start of checking info as DOT comment.
### Response:
def start_output (self):
"""Write start of checking info as DOT comment."""
super(DOTLogger, self).start_output()
if self.has_part("intro"):
self.write_intro()
self.writeln()
self.writeln(u"digraph G {")
self.writeln(u" graph [")
self.writeln(u" charset=\"%s\"," % self.get_charset_encoding())
self.writeln(u" ];")
self.flush() |
def arp(interface='', ipaddr='', macaddr='', **kwargs): # pylint: disable=unused-argument
'''
NAPALM returns a list of dictionaries with details of the ARP entries.
:param interface: interface name to filter on
:param ipaddr: IP address to filter on
:param macaddr: MAC address to filter on
:return: List of the entries in the ARP table
CLI Example:
.. code-block:: bash
salt '*' net.arp
salt '*' net.arp macaddr='5c:5e:ab:da:3c:f0'
Example output:
.. code-block:: python
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
'''
proxy_output = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_arp_table',
**{
}
)
if not proxy_output.get('result'):
return proxy_output
arp_table = proxy_output.get('out')
if interface:
arp_table = _filter_list(arp_table, 'interface', interface)
if ipaddr:
arp_table = _filter_list(arp_table, 'ip', ipaddr)
if macaddr:
arp_table = _filter_list(arp_table, 'mac', macaddr)
proxy_output.update({
'out': arp_table
})
return proxy_output | NAPALM returns a list of dictionaries with details of the ARP entries.
:param interface: interface name to filter on
:param ipaddr: IP address to filter on
:param macaddr: MAC address to filter on
:return: List of the entries in the ARP table
CLI Example:
.. code-block:: bash
salt '*' net.arp
salt '*' net.arp macaddr='5c:5e:ab:da:3c:f0'
Example output:
.. code-block:: python
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
] | Below is the the instruction that describes the task:
### Input:
NAPALM returns a list of dictionaries with details of the ARP entries.
:param interface: interface name to filter on
:param ipaddr: IP address to filter on
:param macaddr: MAC address to filter on
:return: List of the entries in the ARP table
CLI Example:
.. code-block:: bash
salt '*' net.arp
salt '*' net.arp macaddr='5c:5e:ab:da:3c:f0'
Example output:
.. code-block:: python
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
### Response:
def arp(interface='', ipaddr='', macaddr='', **kwargs): # pylint: disable=unused-argument
'''
NAPALM returns a list of dictionaries with details of the ARP entries.
:param interface: interface name to filter on
:param ipaddr: IP address to filter on
:param macaddr: MAC address to filter on
:return: List of the entries in the ARP table
CLI Example:
.. code-block:: bash
salt '*' net.arp
salt '*' net.arp macaddr='5c:5e:ab:da:3c:f0'
Example output:
.. code-block:: python
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
'''
proxy_output = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_arp_table',
**{
}
)
if not proxy_output.get('result'):
return proxy_output
arp_table = proxy_output.get('out')
if interface:
arp_table = _filter_list(arp_table, 'interface', interface)
if ipaddr:
arp_table = _filter_list(arp_table, 'ip', ipaddr)
if macaddr:
arp_table = _filter_list(arp_table, 'mac', macaddr)
proxy_output.update({
'out': arp_table
})
return proxy_output |
def filter_single_grain(self):
'''
This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine
'''
my_index = 0
my_grains = [['Index','Label','Type','Group','Meteorite','Mineralogy','C12/C13','d(Si29/Si30)','d(Si30/Si29)']]
# add the data to this grain list
for it in range(len(self.data)):
my_grains.append([my_index,self.desc[it][self.descdict['Grain Label']], self.desc[it][self.descdict['Type']], self.desc[it][self.descdict['Group']], self.desc[it][self.descdict['Meteorite']], self.desc[it][self.descdict['Mineralogy']], self.data[it][self.datadict['12c/13c']], self.data[it][self.datadict['d(29si/28si)']], self.data[it][self.datadict['d(30si/28si)']]])
my_index += 1
for prt_line in my_grains:
print(prt_line)
# now write the selector for the index of the grains to select which one should be
# available and which ones should be dumped
usr_input = ''
usr_input = input('Select the grains by index that you want to use. Please separate the indeces by a comma, e.g., 1 or 0,2,3,4\n')
# process user index
if usr_input == '':
print('No data selected to filter.')
return None
elif len(usr_input) == 1:
usr_index = [usr_input]
else:
usr_index = usr_input.split(',')
for it in range(len(usr_index)):
usr_index[it] = int(usr_index[it])
# filter
desc_tmp = np.zeros((len(usr_index),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(usr_index),len(self.header_data)))
style_tmp= np.zeros((len(usr_index),len(self.header_style)),dtype='|S1024')
for i in range(len(usr_index)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[usr_index[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[usr_index[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[usr_index[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp | This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine | Below is the the instruction that describes the task:
### Input:
This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine
### Response:
def filter_single_grain(self):
'''
This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine
'''
my_index = 0
my_grains = [['Index','Label','Type','Group','Meteorite','Mineralogy','C12/C13','d(Si29/Si30)','d(Si30/Si29)']]
# add the data to this grain list
for it in range(len(self.data)):
my_grains.append([my_index,self.desc[it][self.descdict['Grain Label']], self.desc[it][self.descdict['Type']], self.desc[it][self.descdict['Group']], self.desc[it][self.descdict['Meteorite']], self.desc[it][self.descdict['Mineralogy']], self.data[it][self.datadict['12c/13c']], self.data[it][self.datadict['d(29si/28si)']], self.data[it][self.datadict['d(30si/28si)']]])
my_index += 1
for prt_line in my_grains:
print(prt_line)
# now write the selector for the index of the grains to select which one should be
# available and which ones should be dumped
usr_input = ''
usr_input = input('Select the grains by index that you want to use. Please separate the indeces by a comma, e.g., 1 or 0,2,3,4\n')
# process user index
if usr_input == '':
print('No data selected to filter.')
return None
elif len(usr_input) == 1:
usr_index = [usr_input]
else:
usr_index = usr_input.split(',')
for it in range(len(usr_index)):
usr_index[it] = int(usr_index[it])
# filter
desc_tmp = np.zeros((len(usr_index),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(usr_index),len(self.header_data)))
style_tmp= np.zeros((len(usr_index),len(self.header_style)),dtype='|S1024')
for i in range(len(usr_index)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[usr_index[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[usr_index[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[usr_index[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp |
def blastnprep(self):
"""Setup blastn analyses"""
# Populate threads for each gene, genome combination
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
#
# sample[self.analysistype].alleleresults = GenObject()
sample[self.analysistype].closealleles = dict()
sample[self.analysistype].mismatches = dict()
sample[self.analysistype].alignmentlength = dict()
sample[self.analysistype].subjectlength = dict()
sample[self.analysistype].queryid = dict()
sample[self.analysistype].start = dict()
sample[self.analysistype].end = dict()
sample[self.analysistype].queryseq = dict()
if type(sample[self.analysistype].allelenames) == list:
for allele in sample[self.analysistype].combinedalleles:
# Add each fasta/allele file combination to the threads
self.runblast(sample.general.bestassemblyfile, allele, sample) | Setup blastn analyses | Below is the the instruction that describes the task:
### Input:
Setup blastn analyses
### Response:
def blastnprep(self):
"""Setup blastn analyses"""
# Populate threads for each gene, genome combination
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
#
# sample[self.analysistype].alleleresults = GenObject()
sample[self.analysistype].closealleles = dict()
sample[self.analysistype].mismatches = dict()
sample[self.analysistype].alignmentlength = dict()
sample[self.analysistype].subjectlength = dict()
sample[self.analysistype].queryid = dict()
sample[self.analysistype].start = dict()
sample[self.analysistype].end = dict()
sample[self.analysistype].queryseq = dict()
if type(sample[self.analysistype].allelenames) == list:
for allele in sample[self.analysistype].combinedalleles:
# Add each fasta/allele file combination to the threads
self.runblast(sample.general.bestassemblyfile, allele, sample) |
def _IsRetryable(error):
"""Returns whether error is likely to be retryable."""
if not isinstance(error, MySQLdb.OperationalError):
return False
if not error.args:
return False
code = error.args[0]
return code in _RETRYABLE_ERRORS | Returns whether error is likely to be retryable. | Below is the the instruction that describes the task:
### Input:
Returns whether error is likely to be retryable.
### Response:
def _IsRetryable(error):
"""Returns whether error is likely to be retryable."""
if not isinstance(error, MySQLdb.OperationalError):
return False
if not error.args:
return False
code = error.args[0]
return code in _RETRYABLE_ERRORS |
def helper_parallel_lines(start0, end0, start1, end1, filename):
"""Image for :func:`.parallel_lines_parameters` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
points = stack1d(start0, end0, start1, end1)
ax.plot(points[0, :2], points[1, :2], marker="o")
ax.plot(points[0, 2:], points[1, 2:], marker="o")
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(figure, filename) | Image for :func:`.parallel_lines_parameters` docstring. | Below is the the instruction that describes the task:
### Input:
Image for :func:`.parallel_lines_parameters` docstring.
### Response:
def helper_parallel_lines(start0, end0, start1, end1, filename):
"""Image for :func:`.parallel_lines_parameters` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
points = stack1d(start0, end0, start1, end1)
ax.plot(points[0, :2], points[1, :2], marker="o")
ax.plot(points[0, 2:], points[1, 2:], marker="o")
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(figure, filename) |
def add_linguistic_type(self, lingtype, constraints=None,
timealignable=True, graphicreferences=False,
extref=None, param_dict=None):
"""Add a linguistic type.
:param str lingtype: Name of the linguistic type.
:param str constraints: Constraint name.
:param bool timealignable: Flag for time alignable.
:param bool graphicreferences: Flag for graphic references.
:param str extref: External reference.
:param dict param_dict: TAG attributes, when this is not ``None`` it
will ignore all other options. Please only use
dictionaries coming from the
:func:`get_parameters_for_linguistic_type`
:raises KeyError: If a constraint is not defined
"""
if param_dict:
self.linguistic_types[lingtype] = param_dict
else:
if constraints:
self.constraints[constraints]
self.linguistic_types[lingtype] = {
'LINGUISTIC_TYPE_ID': lingtype,
'TIME_ALIGNABLE': str(timealignable).lower(),
'GRAPHIC_REFERENCES': str(graphicreferences).lower(),
'CONSTRAINTS': constraints}
if extref is not None:
self.linguistic_types[lingtype]['EXT_REF'] = extref | Add a linguistic type.
:param str lingtype: Name of the linguistic type.
:param str constraints: Constraint name.
:param bool timealignable: Flag for time alignable.
:param bool graphicreferences: Flag for graphic references.
:param str extref: External reference.
:param dict param_dict: TAG attributes, when this is not ``None`` it
will ignore all other options. Please only use
dictionaries coming from the
:func:`get_parameters_for_linguistic_type`
:raises KeyError: If a constraint is not defined | Below is the the instruction that describes the task:
### Input:
Add a linguistic type.
:param str lingtype: Name of the linguistic type.
:param str constraints: Constraint name.
:param bool timealignable: Flag for time alignable.
:param bool graphicreferences: Flag for graphic references.
:param str extref: External reference.
:param dict param_dict: TAG attributes, when this is not ``None`` it
will ignore all other options. Please only use
dictionaries coming from the
:func:`get_parameters_for_linguistic_type`
:raises KeyError: If a constraint is not defined
### Response:
def add_linguistic_type(self, lingtype, constraints=None,
timealignable=True, graphicreferences=False,
extref=None, param_dict=None):
"""Add a linguistic type.
:param str lingtype: Name of the linguistic type.
:param str constraints: Constraint name.
:param bool timealignable: Flag for time alignable.
:param bool graphicreferences: Flag for graphic references.
:param str extref: External reference.
:param dict param_dict: TAG attributes, when this is not ``None`` it
will ignore all other options. Please only use
dictionaries coming from the
:func:`get_parameters_for_linguistic_type`
:raises KeyError: If a constraint is not defined
"""
if param_dict:
self.linguistic_types[lingtype] = param_dict
else:
if constraints:
self.constraints[constraints]
self.linguistic_types[lingtype] = {
'LINGUISTIC_TYPE_ID': lingtype,
'TIME_ALIGNABLE': str(timealignable).lower(),
'GRAPHIC_REFERENCES': str(graphicreferences).lower(),
'CONSTRAINTS': constraints}
if extref is not None:
self.linguistic_types[lingtype]['EXT_REF'] = extref |
async def _next_event(self):
"""
Gets the next event.
"""
while True:
for event in self._connection.events():
if isinstance(event, Message):
# check if we need to buffer
if event.message_finished:
return self._wrap_data(self._gather_buffers(event))
self._buffer(event)
break # exit for loop
else:
return event
data = await self._sock.receive_some(4096)
if not data:
return CloseConnection(code=500, reason="Socket closed")
self._connection.receive_data(data) | Gets the next event. | Below is the the instruction that describes the task:
### Input:
Gets the next event.
### Response:
async def _next_event(self):
"""
Gets the next event.
"""
while True:
for event in self._connection.events():
if isinstance(event, Message):
# check if we need to buffer
if event.message_finished:
return self._wrap_data(self._gather_buffers(event))
self._buffer(event)
break # exit for loop
else:
return event
data = await self._sock.receive_some(4096)
if not data:
return CloseConnection(code=500, reason="Socket closed")
self._connection.receive_data(data) |
def fit_transform(self, Xs=None, ys=None, Xt=None, yt=None):
"""Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt) and transports source samples Xs onto target
ones Xt
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
transp_Xs : array-like, shape (n_source_samples, n_features)
The source samples samples.
"""
return self.fit(Xs, ys, Xt, yt).transform(Xs, ys, Xt, yt) | Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt) and transports source samples Xs onto target
ones Xt
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
transp_Xs : array-like, shape (n_source_samples, n_features)
The source samples samples. | Below is the the instruction that describes the task:
### Input:
Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt) and transports source samples Xs onto target
ones Xt
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
transp_Xs : array-like, shape (n_source_samples, n_features)
The source samples samples.
### Response:
def fit_transform(self, Xs=None, ys=None, Xt=None, yt=None):
"""Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt) and transports source samples Xs onto target
ones Xt
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
transp_Xs : array-like, shape (n_source_samples, n_features)
The source samples samples.
"""
return self.fit(Xs, ys, Xt, yt).transform(Xs, ys, Xt, yt) |
def normalize(self):
"""Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
Raises a ZeroDivisionError if the sum of the values is 0.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65
>>> P = P.normalize()
>>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T'])
0.350 0.650
"""
total = float(sum(self.prob.values()))
if not (1.0-epsilon < total < 1.0+epsilon):
for val in self.prob:
self.prob[val] /= total
return self | Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
Raises a ZeroDivisionError if the sum of the values is 0.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65
>>> P = P.normalize()
>>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T'])
0.350 0.650 | Below is the the instruction that describes the task:
### Input:
Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
Raises a ZeroDivisionError if the sum of the values is 0.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65
>>> P = P.normalize()
>>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T'])
0.350 0.650
### Response:
def normalize(self):
"""Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
Raises a ZeroDivisionError if the sum of the values is 0.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65
>>> P = P.normalize()
>>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T'])
0.350 0.650
"""
total = float(sum(self.prob.values()))
if not (1.0-epsilon < total < 1.0+epsilon):
for val in self.prob:
self.prob[val] /= total
return self |
def corrArray(self, inputArray):
"""#TODO: docstring
:param inputArray: #TODO: docstring
:returns: #TODO docstring
"""
outputArray = numpy.vstack([numpy.nan_to_num(currSpline(inputArray))
for currSpline in self.splines
]).mean(axis=0)
return outputArray | #TODO: docstring
:param inputArray: #TODO: docstring
:returns: #TODO docstring | Below is the the instruction that describes the task:
### Input:
#TODO: docstring
:param inputArray: #TODO: docstring
:returns: #TODO docstring
### Response:
def corrArray(self, inputArray):
"""#TODO: docstring
:param inputArray: #TODO: docstring
:returns: #TODO docstring
"""
outputArray = numpy.vstack([numpy.nan_to_num(currSpline(inputArray))
for currSpline in self.splines
]).mean(axis=0)
return outputArray |
def free_param_names(self):
"""Combined free hyperparameter names for the kernel, noise kernel and (if present) mean function.
"""
p = CombinedBounds(self.k.free_param_names, self.noise_k.free_param_names)
if self.mu is not None:
p = CombinedBounds(p, self.mu.free_param_names)
return p | Combined free hyperparameter names for the kernel, noise kernel and (if present) mean function. | Below is the the instruction that describes the task:
### Input:
Combined free hyperparameter names for the kernel, noise kernel and (if present) mean function.
### Response:
def free_param_names(self):
"""Combined free hyperparameter names for the kernel, noise kernel and (if present) mean function.
"""
p = CombinedBounds(self.k.free_param_names, self.noise_k.free_param_names)
if self.mu is not None:
p = CombinedBounds(p, self.mu.free_param_names)
return p |
def iterprogress( sized_iterable ):
"""
Iterate something printing progress bar to stdout
"""
pb = ProgressBar( 0, len( sized_iterable ) )
for i, value in enumerate( sized_iterable ):
yield value
pb.update_and_print( i, sys.stderr ) | Iterate something printing progress bar to stdout | Below is the the instruction that describes the task:
### Input:
Iterate something printing progress bar to stdout
### Response:
def iterprogress( sized_iterable ):
"""
Iterate something printing progress bar to stdout
"""
pb = ProgressBar( 0, len( sized_iterable ) )
for i, value in enumerate( sized_iterable ):
yield value
pb.update_and_print( i, sys.stderr ) |
def getattr(self, path, fh):
"""Called by FUSE when the attributes for a file or directory are required.
Returns a dictionary with keys identical to the stat C structure of stat(2).
st_atime, st_mtime and st_ctime should be floats. On OSX, st_nlink should count
all files inside the directory. On Linux, only the subdirectories are counted.
The 'st_dev' and 'st_blksize' fields are ignored. The 'st_ino' field is ignored
except if the 'use_ino' mount option is given.
This method gets very heavy traffic.
"""
self._raise_error_if_os_special_file(path)
# log.debug(u'getattr(): {0}'.format(path))
attribute = self._get_attributes_through_cache(path)
# log.debug('getattr() returned attribute: {0}'.format(attribute))
return self._stat_from_attributes(attribute) | Called by FUSE when the attributes for a file or directory are required.
Returns a dictionary with keys identical to the stat C structure of stat(2).
st_atime, st_mtime and st_ctime should be floats. On OSX, st_nlink should count
all files inside the directory. On Linux, only the subdirectories are counted.
The 'st_dev' and 'st_blksize' fields are ignored. The 'st_ino' field is ignored
except if the 'use_ino' mount option is given.
This method gets very heavy traffic. | Below is the the instruction that describes the task:
### Input:
Called by FUSE when the attributes for a file or directory are required.
Returns a dictionary with keys identical to the stat C structure of stat(2).
st_atime, st_mtime and st_ctime should be floats. On OSX, st_nlink should count
all files inside the directory. On Linux, only the subdirectories are counted.
The 'st_dev' and 'st_blksize' fields are ignored. The 'st_ino' field is ignored
except if the 'use_ino' mount option is given.
This method gets very heavy traffic.
### Response:
def getattr(self, path, fh):
"""Called by FUSE when the attributes for a file or directory are required.
Returns a dictionary with keys identical to the stat C structure of stat(2).
st_atime, st_mtime and st_ctime should be floats. On OSX, st_nlink should count
all files inside the directory. On Linux, only the subdirectories are counted.
The 'st_dev' and 'st_blksize' fields are ignored. The 'st_ino' field is ignored
except if the 'use_ino' mount option is given.
This method gets very heavy traffic.
"""
self._raise_error_if_os_special_file(path)
# log.debug(u'getattr(): {0}'.format(path))
attribute = self._get_attributes_through_cache(path)
# log.debug('getattr() returned attribute: {0}'.format(attribute))
return self._stat_from_attributes(attribute) |
def init_app(self, app, **kwargs):
""" Initializes the Flask-Bouncer extension for the specified application.
:param app: The application.
"""
self.app = app
self._init_extension()
self.app.before_request(self.check_implicit_rules)
if kwargs.get('ensure_authorization', False):
self.app.after_request(self.check_authorization) | Initializes the Flask-Bouncer extension for the specified application.
:param app: The application. | Below is the the instruction that describes the task:
### Input:
Initializes the Flask-Bouncer extension for the specified application.
:param app: The application.
### Response:
def init_app(self, app, **kwargs):
""" Initializes the Flask-Bouncer extension for the specified application.
:param app: The application.
"""
self.app = app
self._init_extension()
self.app.before_request(self.check_implicit_rules)
if kwargs.get('ensure_authorization', False):
self.app.after_request(self.check_authorization) |
def get_source_for(self, asm_offset, runtime=True):
""" Solidity source code snippet related to `asm_pos` evm bytecode offset.
If runtime is False, initialization bytecode source map is used
"""
srcmap = self.get_srcmap(runtime)
try:
beg, size, _, _ = srcmap[asm_offset]
except KeyError:
#asm_offset pointing outside the known bytecode
return ''
output = ''
nl = self.source_code[:beg].count('\n') + 1
snippet = self.source_code[beg:beg + size]
for l in snippet.split('\n'):
output += ' %s %s\n' % (nl, l)
nl += 1
return output | Solidity source code snippet related to `asm_pos` evm bytecode offset.
If runtime is False, initialization bytecode source map is used | Below is the the instruction that describes the task:
### Input:
Solidity source code snippet related to `asm_pos` evm bytecode offset.
If runtime is False, initialization bytecode source map is used
### Response:
def get_source_for(self, asm_offset, runtime=True):
""" Solidity source code snippet related to `asm_pos` evm bytecode offset.
If runtime is False, initialization bytecode source map is used
"""
srcmap = self.get_srcmap(runtime)
try:
beg, size, _, _ = srcmap[asm_offset]
except KeyError:
#asm_offset pointing outside the known bytecode
return ''
output = ''
nl = self.source_code[:beg].count('\n') + 1
snippet = self.source_code[beg:beg + size]
for l in snippet.split('\n'):
output += ' %s %s\n' % (nl, l)
nl += 1
return output |
def get_group_list(user, include_default=True):
'''
Returns a list of all of the system group names of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
return []
group_names = None
ugroups = set()
if hasattr(os, 'getgrouplist'):
# Try os.getgrouplist, available in python >= 3.3
log.trace('Trying os.getgrouplist for \'%s\'', user)
try:
group_names = [
grp.getgrgid(grpid).gr_name for grpid in
os.getgrouplist(user, pwd.getpwnam(user).pw_gid)
]
except Exception:
pass
elif HAS_PYSSS:
# Try pysss.getgrouplist
log.trace('Trying pysss.getgrouplist for \'%s\'', user)
try:
group_names = list(pysss.getgrouplist(user))
except Exception:
pass
if group_names is None:
# Fall back to generic code
# Include the user's default group to match behavior of
# os.getgrouplist() and pysss.getgrouplist()
log.trace('Trying generic group list for \'%s\'', user)
group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
try:
default_group = get_default_group(user)
if default_group not in group_names:
group_names.append(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
if group_names is not None:
ugroups.update(group_names)
if include_default is False:
# Historically, saltstack code for getting group lists did not
# include the default group. Some things may only want
# supplemental groups, so include_default=False omits the users
# default group.
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
ugroups.remove(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
log.trace('Group list for user \'%s\': %s', user, sorted(ugroups))
return sorted(ugroups) | Returns a list of all of the system group names of which the user
is a member. | Below is the the instruction that describes the task:
### Input:
Returns a list of all of the system group names of which the user
is a member.
### Response:
def get_group_list(user, include_default=True):
'''
Returns a list of all of the system group names of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
return []
group_names = None
ugroups = set()
if hasattr(os, 'getgrouplist'):
# Try os.getgrouplist, available in python >= 3.3
log.trace('Trying os.getgrouplist for \'%s\'', user)
try:
group_names = [
grp.getgrgid(grpid).gr_name for grpid in
os.getgrouplist(user, pwd.getpwnam(user).pw_gid)
]
except Exception:
pass
elif HAS_PYSSS:
# Try pysss.getgrouplist
log.trace('Trying pysss.getgrouplist for \'%s\'', user)
try:
group_names = list(pysss.getgrouplist(user))
except Exception:
pass
if group_names is None:
# Fall back to generic code
# Include the user's default group to match behavior of
# os.getgrouplist() and pysss.getgrouplist()
log.trace('Trying generic group list for \'%s\'', user)
group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
try:
default_group = get_default_group(user)
if default_group not in group_names:
group_names.append(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
if group_names is not None:
ugroups.update(group_names)
if include_default is False:
# Historically, saltstack code for getting group lists did not
# include the default group. Some things may only want
# supplemental groups, so include_default=False omits the users
# default group.
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
ugroups.remove(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
log.trace('Group list for user \'%s\': %s', user, sorted(ugroups))
return sorted(ugroups) |
def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
bbox = denormalize_bbox(bbox, rows, cols)
x_min, y_min, x_max, y_max = bbox
x1, y1, x2, y2 = crop_coords
cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1]
return normalize_bbox(cropped_bbox, crop_height, crop_width) | Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop. | Below is the the instruction that describes the task:
### Input:
Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
### Response:
def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
bbox = denormalize_bbox(bbox, rows, cols)
x_min, y_min, x_max, y_max = bbox
x1, y1, x2, y2 = crop_coords
cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1]
return normalize_bbox(cropped_bbox, crop_height, crop_width) |
def find_ports(device):
"""
Find the port chain a device is plugged on.
This is done by searching sysfs for a device that matches the device
bus/address combination.
Useful when the underlying usb lib does not return device.port_number for
whatever reason.
"""
bus_id = device.bus
dev_id = device.address
for dirent in os.listdir(USB_SYS_PREFIX):
matches = re.match(USB_PORTS_STR + '$', dirent)
if matches:
bus_str = readattr(dirent, 'busnum')
if bus_str:
busnum = float(bus_str)
else:
busnum = None
dev_str = readattr(dirent, 'devnum')
if dev_str:
devnum = float(dev_str)
else:
devnum = None
if busnum == bus_id and devnum == dev_id:
return str(matches.groups()[1]) | Find the port chain a device is plugged on.
This is done by searching sysfs for a device that matches the device
bus/address combination.
Useful when the underlying usb lib does not return device.port_number for
whatever reason. | Below is the the instruction that describes the task:
### Input:
Find the port chain a device is plugged on.
This is done by searching sysfs for a device that matches the device
bus/address combination.
Useful when the underlying usb lib does not return device.port_number for
whatever reason.
### Response:
def find_ports(device):
"""
Find the port chain a device is plugged on.
This is done by searching sysfs for a device that matches the device
bus/address combination.
Useful when the underlying usb lib does not return device.port_number for
whatever reason.
"""
bus_id = device.bus
dev_id = device.address
for dirent in os.listdir(USB_SYS_PREFIX):
matches = re.match(USB_PORTS_STR + '$', dirent)
if matches:
bus_str = readattr(dirent, 'busnum')
if bus_str:
busnum = float(bus_str)
else:
busnum = None
dev_str = readattr(dirent, 'devnum')
if dev_str:
devnum = float(dev_str)
else:
devnum = None
if busnum == bus_id and devnum == dev_id:
return str(matches.groups()[1]) |
def unregister_checker(self, checker):
"""Unregister a checker instance."""
if checker in self._checkers:
self._checkers.remove(checker) | Unregister a checker instance. | Below is the the instruction that describes the task:
### Input:
Unregister a checker instance.
### Response:
def unregister_checker(self, checker):
"""Unregister a checker instance."""
if checker in self._checkers:
self._checkers.remove(checker) |
def match(self, search, **kwargs):
"""
Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.match(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.match_with_http_info(search, **kwargs)
else:
(data) = self.match_with_http_info(search, **kwargs)
return data | Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.match(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.match(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
### Response:
def match(self, search, **kwargs):
"""
Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.match(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.match_with_http_info(search, **kwargs)
else:
(data) = self.match_with_http_info(search, **kwargs)
return data |
def calculate_integral(self, T1, T2, method):
r'''Method to calculate the integral of a property with respect to
temperature, using a specified method. Uses SciPy's `quad` function
to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
'''
return float(quad(self.calculate, T1, T2, args=(method))[0]) | r'''Method to calculate the integral of a property with respect to
temperature, using a specified method. Uses SciPy's `quad` function
to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`] | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate the integral of a property with respect to
temperature, using a specified method. Uses SciPy's `quad` function
to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
### Response:
def calculate_integral(self, T1, T2, method):
r'''Method to calculate the integral of a property with respect to
temperature, using a specified method. Uses SciPy's `quad` function
to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
'''
return float(quad(self.calculate, T1, T2, args=(method))[0]) |
def masktorgb(mask, color='lightgreen', alpha=1.0):
"""Convert boolean mask to RGB image object for canvas overlay.
Parameters
----------
mask : ndarray
Boolean mask to overlay. 2D image only.
color : str
Color name accepted by Ginga.
alpha : float
Opacity. Unmasked data are always transparent.
Returns
-------
rgbobj : RGBImage
RGB image for canvas Image object.
Raises
------
ValueError
Invalid mask dimension.
"""
mask = np.asarray(mask)
if mask.ndim != 2:
raise ValueError('ndim={0} is not supported'.format(mask.ndim))
ht, wd = mask.shape
r, g, b = colors.lookup_color(color)
rgbobj = RGBImage(data_np=np.zeros((ht, wd, 4), dtype=np.uint8))
rc = rgbobj.get_slice('R')
gc = rgbobj.get_slice('G')
bc = rgbobj.get_slice('B')
ac = rgbobj.get_slice('A')
ac[:] = 0 # Transparent background
rc[mask] = int(r * 255)
gc[mask] = int(g * 255)
bc[mask] = int(b * 255)
ac[mask] = int(alpha * 255)
# For debugging
#rgbobj.save_as_file('ztmp_rgbobj.png')
return rgbobj | Convert boolean mask to RGB image object for canvas overlay.
Parameters
----------
mask : ndarray
Boolean mask to overlay. 2D image only.
color : str
Color name accepted by Ginga.
alpha : float
Opacity. Unmasked data are always transparent.
Returns
-------
rgbobj : RGBImage
RGB image for canvas Image object.
Raises
------
ValueError
Invalid mask dimension. | Below is the the instruction that describes the task:
### Input:
Convert boolean mask to RGB image object for canvas overlay.
Parameters
----------
mask : ndarray
Boolean mask to overlay. 2D image only.
color : str
Color name accepted by Ginga.
alpha : float
Opacity. Unmasked data are always transparent.
Returns
-------
rgbobj : RGBImage
RGB image for canvas Image object.
Raises
------
ValueError
Invalid mask dimension.
### Response:
def masktorgb(mask, color='lightgreen', alpha=1.0):
"""Convert boolean mask to RGB image object for canvas overlay.
Parameters
----------
mask : ndarray
Boolean mask to overlay. 2D image only.
color : str
Color name accepted by Ginga.
alpha : float
Opacity. Unmasked data are always transparent.
Returns
-------
rgbobj : RGBImage
RGB image for canvas Image object.
Raises
------
ValueError
Invalid mask dimension.
"""
mask = np.asarray(mask)
if mask.ndim != 2:
raise ValueError('ndim={0} is not supported'.format(mask.ndim))
ht, wd = mask.shape
r, g, b = colors.lookup_color(color)
rgbobj = RGBImage(data_np=np.zeros((ht, wd, 4), dtype=np.uint8))
rc = rgbobj.get_slice('R')
gc = rgbobj.get_slice('G')
bc = rgbobj.get_slice('B')
ac = rgbobj.get_slice('A')
ac[:] = 0 # Transparent background
rc[mask] = int(r * 255)
gc[mask] = int(g * 255)
bc[mask] = int(b * 255)
ac[mask] = int(alpha * 255)
# For debugging
#rgbobj.save_as_file('ztmp_rgbobj.png')
return rgbobj |
def create(blocks, mode='basic', inplanes=16, divisor=4, num_classes=1000):
""" Vel factory function """
block_dict = {
'basic': BasicBlock,
'bottleneck': Bottleneck
}
def instantiate(**_):
return ResNetV2(block_dict[mode], blocks, inplanes=inplanes, divisor=divisor, num_classes=num_classes)
return ModelFactory.generic(instantiate) | Vel factory function | Below is the the instruction that describes the task:
### Input:
Vel factory function
### Response:
def create(blocks, mode='basic', inplanes=16, divisor=4, num_classes=1000):
""" Vel factory function """
block_dict = {
'basic': BasicBlock,
'bottleneck': Bottleneck
}
def instantiate(**_):
return ResNetV2(block_dict[mode], blocks, inplanes=inplanes, divisor=divisor, num_classes=num_classes)
return ModelFactory.generic(instantiate) |
def unicodify(filename):
"""Make sure filename is Unicode.
Because the tarfile module on Python 2 doesn't return Unicode.
"""
if isinstance(filename, bytes):
return filename.decode(locale.getpreferredencoding())
else:
return filename | Make sure filename is Unicode.
Because the tarfile module on Python 2 doesn't return Unicode. | Below is the the instruction that describes the task:
### Input:
Make sure filename is Unicode.
Because the tarfile module on Python 2 doesn't return Unicode.
### Response:
def unicodify(filename):
"""Make sure filename is Unicode.
Because the tarfile module on Python 2 doesn't return Unicode.
"""
if isinstance(filename, bytes):
return filename.decode(locale.getpreferredencoding())
else:
return filename |
def process_sub(ref, alt_str):
"""Process substitution"""
if len(ref) == len(alt_str):
if len(ref) == 1:
return record.Substitution(record.SNV, alt_str)
else:
return record.Substitution(record.MNV, alt_str)
elif len(ref) > len(alt_str):
return process_sub_grow(ref, alt_str)
else: # len(ref) < len(alt_str):
return process_sub_shrink(ref, alt_str) | Process substitution | Below is the the instruction that describes the task:
### Input:
Process substitution
### Response:
def process_sub(ref, alt_str):
"""Process substitution"""
if len(ref) == len(alt_str):
if len(ref) == 1:
return record.Substitution(record.SNV, alt_str)
else:
return record.Substitution(record.MNV, alt_str)
elif len(ref) > len(alt_str):
return process_sub_grow(ref, alt_str)
else: # len(ref) < len(alt_str):
return process_sub_shrink(ref, alt_str) |
def _open(self, mode=None):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
Note: Copied from stdlib. Added option to override 'mode'
"""
if mode is None:
mode = self.mode
if self.encoding is None:
stream = open(self.baseFilename, mode)
else:
stream = codecs.open(self.baseFilename, mode, self.encoding)
return stream | Open the current base file with the (original) mode and encoding.
Return the resulting stream.
Note: Copied from stdlib. Added option to override 'mode' | Below is the the instruction that describes the task:
### Input:
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
Note: Copied from stdlib. Added option to override 'mode'
### Response:
def _open(self, mode=None):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
Note: Copied from stdlib. Added option to override 'mode'
"""
if mode is None:
mode = self.mode
if self.encoding is None:
stream = open(self.baseFilename, mode)
else:
stream = codecs.open(self.baseFilename, mode, self.encoding)
return stream |
def time_in_a_while(days=0, seconds=0, microseconds=0, milliseconds=0,
minutes=0, hours=0, weeks=0):
"""
format of timedelta:
timedelta([days[, seconds[, microseconds[, milliseconds[,
minutes[, hours[, weeks]]]]]]])
:return: UTC time
"""
delta = timedelta(days, seconds, microseconds, milliseconds,
minutes, hours, weeks)
return datetime.utcnow() + delta | format of timedelta:
timedelta([days[, seconds[, microseconds[, milliseconds[,
minutes[, hours[, weeks]]]]]]])
:return: UTC time | Below is the the instruction that describes the task:
### Input:
format of timedelta:
timedelta([days[, seconds[, microseconds[, milliseconds[,
minutes[, hours[, weeks]]]]]]])
:return: UTC time
### Response:
def time_in_a_while(days=0, seconds=0, microseconds=0, milliseconds=0,
minutes=0, hours=0, weeks=0):
"""
format of timedelta:
timedelta([days[, seconds[, microseconds[, milliseconds[,
minutes[, hours[, weeks]]]]]]])
:return: UTC time
"""
delta = timedelta(days, seconds, microseconds, milliseconds,
minutes, hours, weeks)
return datetime.utcnow() + delta |
def _get_bigquery_service(self):
"""
Connect to the BigQuery service.
Calling ``GoogleCredentials.get_application_default`` requires that
you either be running in the Google Cloud, or have the
``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path
to a credentials JSON file.
:return: authenticated BigQuery service connection object
:rtype: `googleapiclient.discovery.Resource <http://google.github.io/\
google-api-python-client/docs/epy/googleapiclient.discovery.\
Resource-class.html>`_
"""
logger.debug('Getting Google Credentials')
credentials = GoogleCredentials.get_application_default()
logger.debug('Building BigQuery service instance')
bigquery_service = build('bigquery', 'v2', credentials=credentials)
return bigquery_service | Connect to the BigQuery service.
Calling ``GoogleCredentials.get_application_default`` requires that
you either be running in the Google Cloud, or have the
``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path
to a credentials JSON file.
:return: authenticated BigQuery service connection object
:rtype: `googleapiclient.discovery.Resource <http://google.github.io/\
google-api-python-client/docs/epy/googleapiclient.discovery.\
Resource-class.html>`_ | Below is the the instruction that describes the task:
### Input:
Connect to the BigQuery service.
Calling ``GoogleCredentials.get_application_default`` requires that
you either be running in the Google Cloud, or have the
``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path
to a credentials JSON file.
:return: authenticated BigQuery service connection object
:rtype: `googleapiclient.discovery.Resource <http://google.github.io/\
google-api-python-client/docs/epy/googleapiclient.discovery.\
Resource-class.html>`_
### Response:
def _get_bigquery_service(self):
"""
Connect to the BigQuery service.
Calling ``GoogleCredentials.get_application_default`` requires that
you either be running in the Google Cloud, or have the
``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path
to a credentials JSON file.
:return: authenticated BigQuery service connection object
:rtype: `googleapiclient.discovery.Resource <http://google.github.io/\
google-api-python-client/docs/epy/googleapiclient.discovery.\
Resource-class.html>`_
"""
logger.debug('Getting Google Credentials')
credentials = GoogleCredentials.get_application_default()
logger.debug('Building BigQuery service instance')
bigquery_service = build('bigquery', 'v2', credentials=credentials)
return bigquery_service |
def simulate_diffusion(self, save_pos=False, total_emission=True,
radial=False, rs=None, seed=1, path='./',
wrap_func=wrap_periodic,
chunksize=2**19, chunkslice='times', verbose=True):
"""Simulate Brownian motion trajectories and emission rates.
This method performs the Brownian motion simulation using the current
set of parameters. Before running this method you can check the
disk-space requirements using :method:`print_sizes`.
Results are stored to disk in HDF5 format and are accessible in
in `self.emission`, `self.emission_tot` and `self.position` as
pytables arrays.
Arguments:
save_pos (bool): if True, save the particles 3D trajectories
total_emission (bool): if True, store only the total emission array
containing the sum of emission of all the particles.
rs (RandomState object): random state object used as random number
generator. If None, use a random state initialized from seed.
seed (uint): when `rs` is None, `seed` is used to initialize the
random state, otherwise is ignored.
wrap_func (function): the function used to apply the boundary
condition (use :func:`wrap_periodic` or :func:`wrap_mirror`).
path (string): a folder where simulation data is saved.
verbose (bool): if False, prints no output.
"""
if rs is None:
rs = np.random.RandomState(seed=seed)
self.open_store_traj(chunksize=chunksize, chunkslice=chunkslice,
radial=radial, path=path)
# Save current random state for reproducibility
self.traj_group._v_attrs['init_random_state'] = rs.get_state()
em_store = self.emission_tot if total_emission else self.emission
print('- Start trajectories simulation - %s' % ctime(), flush=True)
if verbose:
print('[PID %d] Diffusion time:' % os.getpid(), end='')
i_chunk = 0
t_chunk_size = self.emission.chunkshape[1]
chunk_duration = t_chunk_size * self.t_step
par_start_pos = self.particles.positions
prev_time = 0
for time_size in iter_chunksize(self.n_samples, t_chunk_size):
if verbose:
curr_time = int(chunk_duration * (i_chunk + 1))
if curr_time > prev_time:
print(' %ds' % curr_time, end='', flush=True)
prev_time = curr_time
POS, em = self._sim_trajectories(time_size, par_start_pos, rs,
total_emission=total_emission,
save_pos=save_pos, radial=radial,
wrap_func=wrap_func)
## Append em to the permanent storage
# if total_emission, data is just a linear array
# otherwise is a 2-D array (self.num_particles, c_size)
em_store.append(em)
if save_pos:
self.position.append(np.vstack(POS).astype('float32'))
i_chunk += 1
self.store.h5file.flush()
# Save current random state
self.traj_group._v_attrs['last_random_state'] = rs.get_state()
self.store.h5file.flush()
print('\n- End trajectories simulation - %s' % ctime(), flush=True) | Simulate Brownian motion trajectories and emission rates.
This method performs the Brownian motion simulation using the current
set of parameters. Before running this method you can check the
disk-space requirements using :method:`print_sizes`.
Results are stored to disk in HDF5 format and are accessible in
in `self.emission`, `self.emission_tot` and `self.position` as
pytables arrays.
Arguments:
save_pos (bool): if True, save the particles 3D trajectories
total_emission (bool): if True, store only the total emission array
containing the sum of emission of all the particles.
rs (RandomState object): random state object used as random number
generator. If None, use a random state initialized from seed.
seed (uint): when `rs` is None, `seed` is used to initialize the
random state, otherwise is ignored.
wrap_func (function): the function used to apply the boundary
condition (use :func:`wrap_periodic` or :func:`wrap_mirror`).
path (string): a folder where simulation data is saved.
verbose (bool): if False, prints no output. | Below is the the instruction that describes the task:
### Input:
Simulate Brownian motion trajectories and emission rates.
This method performs the Brownian motion simulation using the current
set of parameters. Before running this method you can check the
disk-space requirements using :method:`print_sizes`.
Results are stored to disk in HDF5 format and are accessible in
in `self.emission`, `self.emission_tot` and `self.position` as
pytables arrays.
Arguments:
save_pos (bool): if True, save the particles 3D trajectories
total_emission (bool): if True, store only the total emission array
containing the sum of emission of all the particles.
rs (RandomState object): random state object used as random number
generator. If None, use a random state initialized from seed.
seed (uint): when `rs` is None, `seed` is used to initialize the
random state, otherwise is ignored.
wrap_func (function): the function used to apply the boundary
condition (use :func:`wrap_periodic` or :func:`wrap_mirror`).
path (string): a folder where simulation data is saved.
verbose (bool): if False, prints no output.
### Response:
def simulate_diffusion(self, save_pos=False, total_emission=True,
radial=False, rs=None, seed=1, path='./',
wrap_func=wrap_periodic,
chunksize=2**19, chunkslice='times', verbose=True):
"""Simulate Brownian motion trajectories and emission rates.
This method performs the Brownian motion simulation using the current
set of parameters. Before running this method you can check the
disk-space requirements using :method:`print_sizes`.
Results are stored to disk in HDF5 format and are accessible in
in `self.emission`, `self.emission_tot` and `self.position` as
pytables arrays.
Arguments:
save_pos (bool): if True, save the particles 3D trajectories
total_emission (bool): if True, store only the total emission array
containing the sum of emission of all the particles.
rs (RandomState object): random state object used as random number
generator. If None, use a random state initialized from seed.
seed (uint): when `rs` is None, `seed` is used to initialize the
random state, otherwise is ignored.
wrap_func (function): the function used to apply the boundary
condition (use :func:`wrap_periodic` or :func:`wrap_mirror`).
path (string): a folder where simulation data is saved.
verbose (bool): if False, prints no output.
"""
if rs is None:
rs = np.random.RandomState(seed=seed)
self.open_store_traj(chunksize=chunksize, chunkslice=chunkslice,
radial=radial, path=path)
# Save current random state for reproducibility
self.traj_group._v_attrs['init_random_state'] = rs.get_state()
em_store = self.emission_tot if total_emission else self.emission
print('- Start trajectories simulation - %s' % ctime(), flush=True)
if verbose:
print('[PID %d] Diffusion time:' % os.getpid(), end='')
i_chunk = 0
t_chunk_size = self.emission.chunkshape[1]
chunk_duration = t_chunk_size * self.t_step
par_start_pos = self.particles.positions
prev_time = 0
for time_size in iter_chunksize(self.n_samples, t_chunk_size):
if verbose:
curr_time = int(chunk_duration * (i_chunk + 1))
if curr_time > prev_time:
print(' %ds' % curr_time, end='', flush=True)
prev_time = curr_time
POS, em = self._sim_trajectories(time_size, par_start_pos, rs,
total_emission=total_emission,
save_pos=save_pos, radial=radial,
wrap_func=wrap_func)
## Append em to the permanent storage
# if total_emission, data is just a linear array
# otherwise is a 2-D array (self.num_particles, c_size)
em_store.append(em)
if save_pos:
self.position.append(np.vstack(POS).astype('float32'))
i_chunk += 1
self.store.h5file.flush()
# Save current random state
self.traj_group._v_attrs['last_random_state'] = rs.get_state()
self.store.h5file.flush()
print('\n- End trajectories simulation - %s' % ctime(), flush=True) |
def version_variants(version):
"""Given an igraph version number, returns a list of possible version
number variants to try when looking for a suitable nightly build of the
C core to download from igraph.org."""
result = [version]
# Add trailing ".0" as needed to ensure that we have at least
# major.minor.patch
parts = version.split(".")
while len(parts) < 3:
parts.append("0")
result.append(".".join(parts))
return result | Given an igraph version number, returns a list of possible version
number variants to try when looking for a suitable nightly build of the
C core to download from igraph.org. | Below is the the instruction that describes the task:
### Input:
Given an igraph version number, returns a list of possible version
number variants to try when looking for a suitable nightly build of the
C core to download from igraph.org.
### Response:
def version_variants(version):
"""Given an igraph version number, returns a list of possible version
number variants to try when looking for a suitable nightly build of the
C core to download from igraph.org."""
result = [version]
# Add trailing ".0" as needed to ensure that we have at least
# major.minor.patch
parts = version.split(".")
while len(parts) < 3:
parts.append("0")
result.append(".".join(parts))
return result |
def serialize(value, field):
"""
Form values serialization
:param object value: A value to be serialized\
for saving it into the database and later\
loading it into the form as initial value
"""
assert isinstance(field, forms.Field)
if isinstance(field, forms.ModelMultipleChoiceField):
return json.dumps([v.pk for v in value])
# todo: remove
if isinstance(value, models.Model):
return value.pk
return value | Form values serialization
:param object value: A value to be serialized\
for saving it into the database and later\
loading it into the form as initial value | Below is the the instruction that describes the task:
### Input:
Form values serialization
:param object value: A value to be serialized\
for saving it into the database and later\
loading it into the form as initial value
### Response:
def serialize(value, field):
"""
Form values serialization
:param object value: A value to be serialized\
for saving it into the database and later\
loading it into the form as initial value
"""
assert isinstance(field, forms.Field)
if isinstance(field, forms.ModelMultipleChoiceField):
return json.dumps([v.pk for v in value])
# todo: remove
if isinstance(value, models.Model):
return value.pk
return value |
def indent(elem, level=0):
"""
Helper function, adds indentation to XML output.
:param elem: object of Element class, representing element to which method adds intendation,
:param level: current level of intendation.
"""
i = "\n" + level * " "
j = "\n" + (level - 1) * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
BpmnDiagramGraphExport.indent(subelem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem | Helper function, adds indentation to XML output.
:param elem: object of Element class, representing element to which method adds intendation,
:param level: current level of intendation. | Below is the the instruction that describes the task:
### Input:
Helper function, adds indentation to XML output.
:param elem: object of Element class, representing element to which method adds intendation,
:param level: current level of intendation.
### Response:
def indent(elem, level=0):
"""
Helper function, adds indentation to XML output.
:param elem: object of Element class, representing element to which method adds intendation,
:param level: current level of intendation.
"""
i = "\n" + level * " "
j = "\n" + (level - 1) * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
BpmnDiagramGraphExport.indent(subelem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem |
def get_bin_dir(self, build_module: str) -> str:
"""Return a path to the binaries dir for a build module dir.
Create sub-tree of missing dirs as needed, and return full path
to innermost directory.
"""
bin_dir = os.path.join(self.conf.get_bin_path(), build_module)
if not os.path.isdir(bin_dir):
# exist_ok=True in case of concurrent creation of the same dir
os.makedirs(bin_dir, exist_ok=True)
return bin_dir | Return a path to the binaries dir for a build module dir.
Create sub-tree of missing dirs as needed, and return full path
to innermost directory. | Below is the the instruction that describes the task:
### Input:
Return a path to the binaries dir for a build module dir.
Create sub-tree of missing dirs as needed, and return full path
to innermost directory.
### Response:
def get_bin_dir(self, build_module: str) -> str:
"""Return a path to the binaries dir for a build module dir.
Create sub-tree of missing dirs as needed, and return full path
to innermost directory.
"""
bin_dir = os.path.join(self.conf.get_bin_path(), build_module)
if not os.path.isdir(bin_dir):
# exist_ok=True in case of concurrent creation of the same dir
os.makedirs(bin_dir, exist_ok=True)
return bin_dir |
def find_price_by_category(package, price_category):
"""Find the price in the given package that has the specified category
:param package: The AsAService, Enterprise, or Performance product package
:param price_category: The price category code to search for
:return: Returns the price for the given category, or an error if not found
"""
for item in package['items']:
price_id = _find_price_id(item['prices'], price_category)
if price_id:
return price_id
raise ValueError("Could not find price with the category, %s" % price_category) | Find the price in the given package that has the specified category
:param package: The AsAService, Enterprise, or Performance product package
:param price_category: The price category code to search for
:return: Returns the price for the given category, or an error if not found | Below is the the instruction that describes the task:
### Input:
Find the price in the given package that has the specified category
:param package: The AsAService, Enterprise, or Performance product package
:param price_category: The price category code to search for
:return: Returns the price for the given category, or an error if not found
### Response:
def find_price_by_category(package, price_category):
"""Find the price in the given package that has the specified category
:param package: The AsAService, Enterprise, or Performance product package
:param price_category: The price category code to search for
:return: Returns the price for the given category, or an error if not found
"""
for item in package['items']:
price_id = _find_price_id(item['prices'], price_category)
if price_id:
return price_id
raise ValueError("Could not find price with the category, %s" % price_category) |
def id(self):
"""
:return: the ID attribute of the sentence
:rtype: int
"""
if self._id is None:
self._id = int(self._element.get('id'))
return self._id | :return: the ID attribute of the sentence
:rtype: int | Below is the the instruction that describes the task:
### Input:
:return: the ID attribute of the sentence
:rtype: int
### Response:
def id(self):
"""
:return: the ID attribute of the sentence
:rtype: int
"""
if self._id is None:
self._id = int(self._element.get('id'))
return self._id |
def v_reference_leaf_leafref(ctx, stmt):
"""Verify that all leafrefs in a leaf or leaf-list have correct path"""
if (hasattr(stmt, 'i_leafref') and
stmt.i_leafref is not None and
stmt.i_leafref_expanded is False):
path_type_spec = stmt.i_leafref
not_req_inst = not(path_type_spec.require_instance)
x = validate_leafref_path(ctx, stmt,
path_type_spec.path_spec,
path_type_spec.path_,
accept_non_config_target=not_req_inst
)
if x is None:
return
ptr, expanded_path, path_list = x
path_type_spec.i_target_node = ptr
path_type_spec.i_expanded_path = expanded_path
path_type_spec.i_path_list = path_list
stmt.i_leafref_expanded = True
if ptr is not None:
chk_status(ctx, stmt, ptr)
stmt.i_leafref_ptr = (ptr, path_type_spec.pos) | Verify that all leafrefs in a leaf or leaf-list have correct path | Below is the the instruction that describes the task:
### Input:
Verify that all leafrefs in a leaf or leaf-list have correct path
### Response:
def v_reference_leaf_leafref(ctx, stmt):
"""Verify that all leafrefs in a leaf or leaf-list have correct path"""
if (hasattr(stmt, 'i_leafref') and
stmt.i_leafref is not None and
stmt.i_leafref_expanded is False):
path_type_spec = stmt.i_leafref
not_req_inst = not(path_type_spec.require_instance)
x = validate_leafref_path(ctx, stmt,
path_type_spec.path_spec,
path_type_spec.path_,
accept_non_config_target=not_req_inst
)
if x is None:
return
ptr, expanded_path, path_list = x
path_type_spec.i_target_node = ptr
path_type_spec.i_expanded_path = expanded_path
path_type_spec.i_path_list = path_list
stmt.i_leafref_expanded = True
if ptr is not None:
chk_status(ctx, stmt, ptr)
stmt.i_leafref_ptr = (ptr, path_type_spec.pos) |
def atlas_peer_has_fresh_zonefile_inventory( peer_hostport, peer_table=None ):
"""
Does the given atlas node have a fresh zonefile inventory?
"""
fresh = False
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
now = time_now()
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
# NOTE: zero-length or None peer inventory means the peer is simply dead, but we've pinged it
if ptbl[peer_hostport].has_key('zonefile_inventory_last_refresh') and \
ptbl[peer_hostport]['zonefile_inventory_last_refresh'] + atlas_peer_ping_interval() > now:
fresh = True
return fresh | Does the given atlas node have a fresh zonefile inventory? | Below is the the instruction that describes the task:
### Input:
Does the given atlas node have a fresh zonefile inventory?
### Response:
def atlas_peer_has_fresh_zonefile_inventory( peer_hostport, peer_table=None ):
"""
Does the given atlas node have a fresh zonefile inventory?
"""
fresh = False
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
now = time_now()
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
# NOTE: zero-length or None peer inventory means the peer is simply dead, but we've pinged it
if ptbl[peer_hostport].has_key('zonefile_inventory_last_refresh') and \
ptbl[peer_hostport]['zonefile_inventory_last_refresh'] + atlas_peer_ping_interval() > now:
fresh = True
return fresh |
def _validate_authority_uri_abs_path(host, path):
"""Ensure that path in URL with authority starts with a leading slash.
Raise ValueError if not.
"""
if len(host) > 0 and len(path) > 0 and not path.startswith("/"):
raise ValueError(
"Path in a URL with authority " "should start with a slash ('/') if set"
) | Ensure that path in URL with authority starts with a leading slash.
Raise ValueError if not. | Below is the the instruction that describes the task:
### Input:
Ensure that path in URL with authority starts with a leading slash.
Raise ValueError if not.
### Response:
def _validate_authority_uri_abs_path(host, path):
"""Ensure that path in URL with authority starts with a leading slash.
Raise ValueError if not.
"""
if len(host) > 0 and len(path) > 0 and not path.startswith("/"):
raise ValueError(
"Path in a URL with authority " "should start with a slash ('/') if set"
) |
def compare_parts(list1, list2):
"""
if list2 does not start with list1, we can't really check and return 0
"""
for i, item in enumerate(list1):
if item != list2[i]:
return 0
if len(list2) > len(list1):
return ISDIR
else:
return ISFILE | if list2 does not start with list1, we can't really check and return 0 | Below is the the instruction that describes the task:
### Input:
if list2 does not start with list1, we can't really check and return 0
### Response:
def compare_parts(list1, list2):
"""
if list2 does not start with list1, we can't really check and return 0
"""
for i, item in enumerate(list1):
if item != list2[i]:
return 0
if len(list2) > len(list1):
return ISDIR
else:
return ISFILE |
def cmdify(self):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
return " ".join(
itertools.chain(
[_quote_if_contains(self.command, r"[\s^()]")],
(_quote_if_contains(arg, r"[\s^]") for arg in self.args),
)
) | Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence | Below is the the instruction that describes the task:
### Input:
Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
### Response:
def cmdify(self):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
return " ".join(
itertools.chain(
[_quote_if_contains(self.command, r"[\s^()]")],
(_quote_if_contains(arg, r"[\s^]") for arg in self.args),
)
) |
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data) | Write the output data in resources returned by exporter
to files. | Below is the the instruction that describes the task:
### Input:
Write the output data in resources returned by exporter
to files.
### Response:
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data) |
def dbmax10years(self, value=None):
""" Corresponds to IDD Field `dbmax10years`
10-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax10years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmax10years`'.format(value))
self._dbmax10years = value | Corresponds to IDD Field `dbmax10years`
10-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax10years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | Below is the the instruction that describes the task:
### Input:
Corresponds to IDD Field `dbmax10years`
10-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax10years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def dbmax10years(self, value=None):
""" Corresponds to IDD Field `dbmax10years`
10-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax10years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmax10years`'.format(value))
self._dbmax10years = value |
def get_complex_coefficients(self, params):
"""
Get the arrays ``alpha_complex_*`` and ``beta_complex_*``
This method should be overloaded by subclasses to return the arrays
``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``,
and ``beta_complex_imag`` given the current parameter settings. By
default, this term is empty.
Returns:
(array[j_complex], array[j_complex], array[j_complex],
array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real``, and ``beta_complex_imag`` as described
above. ``alpha_complex_imag`` can be omitted and it will be
assumed to be zero.
"""
return np.empty(0), np.empty(0), np.empty(0), np.empty(0) | Get the arrays ``alpha_complex_*`` and ``beta_complex_*``
This method should be overloaded by subclasses to return the arrays
``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``,
and ``beta_complex_imag`` given the current parameter settings. By
default, this term is empty.
Returns:
(array[j_complex], array[j_complex], array[j_complex],
array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real``, and ``beta_complex_imag`` as described
above. ``alpha_complex_imag`` can be omitted and it will be
assumed to be zero. | Below is the the instruction that describes the task:
### Input:
Get the arrays ``alpha_complex_*`` and ``beta_complex_*``
This method should be overloaded by subclasses to return the arrays
``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``,
and ``beta_complex_imag`` given the current parameter settings. By
default, this term is empty.
Returns:
(array[j_complex], array[j_complex], array[j_complex],
array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real``, and ``beta_complex_imag`` as described
above. ``alpha_complex_imag`` can be omitted and it will be
assumed to be zero.
### Response:
def get_complex_coefficients(self, params):
"""
Get the arrays ``alpha_complex_*`` and ``beta_complex_*``
This method should be overloaded by subclasses to return the arrays
``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``,
and ``beta_complex_imag`` given the current parameter settings. By
default, this term is empty.
Returns:
(array[j_complex], array[j_complex], array[j_complex],
array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real``, and ``beta_complex_imag`` as described
above. ``alpha_complex_imag`` can be omitted and it will be
assumed to be zero.
"""
return np.empty(0), np.empty(0), np.empty(0), np.empty(0) |
def splits(cls, text_field, label_field, root='.data',
train='train.txt', validation='dev.txt', test='test.txt',
train_subtrees=False, **kwargs):
"""Create dataset objects for splits of the SST dataset.
Arguments:
text_field: The field that will be used for the sentence.
label_field: The field that will be used for label data.
root: The root directory that the dataset's zip archive will be
expanded into; therefore the directory in whose trees
subdirectory the data files will be stored.
train: The filename of the train data. Default: 'train.txt'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'dev.txt'.
test: The filename of the test data, or None to not load the test
set. Default: 'test.txt'.
train_subtrees: Whether to use all subtrees in the training set.
Default: False.
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
path = cls.download(root)
train_data = None if train is None else cls(
os.path.join(path, train), text_field, label_field, subtrees=train_subtrees,
**kwargs)
val_data = None if validation is None else cls(
os.path.join(path, validation), text_field, label_field, **kwargs)
test_data = None if test is None else cls(
os.path.join(path, test), text_field, label_field, **kwargs)
return tuple(d for d in (train_data, val_data, test_data)
if d is not None) | Create dataset objects for splits of the SST dataset.
Arguments:
text_field: The field that will be used for the sentence.
label_field: The field that will be used for label data.
root: The root directory that the dataset's zip archive will be
expanded into; therefore the directory in whose trees
subdirectory the data files will be stored.
train: The filename of the train data. Default: 'train.txt'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'dev.txt'.
test: The filename of the test data, or None to not load the test
set. Default: 'test.txt'.
train_subtrees: Whether to use all subtrees in the training set.
Default: False.
Remaining keyword arguments: Passed to the splits method of
Dataset. | Below is the the instruction that describes the task:
### Input:
Create dataset objects for splits of the SST dataset.
Arguments:
text_field: The field that will be used for the sentence.
label_field: The field that will be used for label data.
root: The root directory that the dataset's zip archive will be
expanded into; therefore the directory in whose trees
subdirectory the data files will be stored.
train: The filename of the train data. Default: 'train.txt'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'dev.txt'.
test: The filename of the test data, or None to not load the test
set. Default: 'test.txt'.
train_subtrees: Whether to use all subtrees in the training set.
Default: False.
Remaining keyword arguments: Passed to the splits method of
Dataset.
### Response:
def splits(cls, text_field, label_field, root='.data',
train='train.txt', validation='dev.txt', test='test.txt',
train_subtrees=False, **kwargs):
"""Create dataset objects for splits of the SST dataset.
Arguments:
text_field: The field that will be used for the sentence.
label_field: The field that will be used for label data.
root: The root directory that the dataset's zip archive will be
expanded into; therefore the directory in whose trees
subdirectory the data files will be stored.
train: The filename of the train data. Default: 'train.txt'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'dev.txt'.
test: The filename of the test data, or None to not load the test
set. Default: 'test.txt'.
train_subtrees: Whether to use all subtrees in the training set.
Default: False.
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
path = cls.download(root)
train_data = None if train is None else cls(
os.path.join(path, train), text_field, label_field, subtrees=train_subtrees,
**kwargs)
val_data = None if validation is None else cls(
os.path.join(path, validation), text_field, label_field, **kwargs)
test_data = None if test is None else cls(
os.path.join(path, test), text_field, label_field, **kwargs)
return tuple(d for d in (train_data, val_data, test_data)
if d is not None) |
def resolve_import_alias(name, import_names):
"""Resolve a name from an aliased import to its original name.
:param name: The potentially aliased name to resolve.
:type name: str
:param import_names: The pairs of original names and aliases
from the import.
:type import_names: iterable(tuple(str, str or None))
:returns: The original name.
:rtype: str
"""
resolved_name = name
for import_name, imported_as in import_names:
if import_name == name:
break
if imported_as == name:
resolved_name = import_name
break
return resolved_name | Resolve a name from an aliased import to its original name.
:param name: The potentially aliased name to resolve.
:type name: str
:param import_names: The pairs of original names and aliases
from the import.
:type import_names: iterable(tuple(str, str or None))
:returns: The original name.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Resolve a name from an aliased import to its original name.
:param name: The potentially aliased name to resolve.
:type name: str
:param import_names: The pairs of original names and aliases
from the import.
:type import_names: iterable(tuple(str, str or None))
:returns: The original name.
:rtype: str
### Response:
def resolve_import_alias(name, import_names):
"""Resolve a name from an aliased import to its original name.
:param name: The potentially aliased name to resolve.
:type name: str
:param import_names: The pairs of original names and aliases
from the import.
:type import_names: iterable(tuple(str, str or None))
:returns: The original name.
:rtype: str
"""
resolved_name = name
for import_name, imported_as in import_names:
if import_name == name:
break
if imported_as == name:
resolved_name = import_name
break
return resolved_name |
def hourly_horizontal_infrared(self):
"""A data collection containing hourly horizontal infrared intensity in W/m2.
"""
sky_cover = self._sky_condition.hourly_sky_cover
db_temp = self._dry_bulb_condition.hourly_values
dp_temp = self._humidity_condition.hourly_dew_point_values(
self._dry_bulb_condition)
horiz_ir = []
for i in xrange(len(sky_cover)):
horiz_ir.append(
calc_horizontal_infrared(sky_cover[i], db_temp[i], dp_temp[i]))
return self._get_daily_data_collections(
energyflux.HorizontalInfraredRadiationIntensity(), 'W/m2', horiz_ir) | A data collection containing hourly horizontal infrared intensity in W/m2. | Below is the the instruction that describes the task:
### Input:
A data collection containing hourly horizontal infrared intensity in W/m2.
### Response:
def hourly_horizontal_infrared(self):
"""A data collection containing hourly horizontal infrared intensity in W/m2.
"""
sky_cover = self._sky_condition.hourly_sky_cover
db_temp = self._dry_bulb_condition.hourly_values
dp_temp = self._humidity_condition.hourly_dew_point_values(
self._dry_bulb_condition)
horiz_ir = []
for i in xrange(len(sky_cover)):
horiz_ir.append(
calc_horizontal_infrared(sky_cover[i], db_temp[i], dp_temp[i]))
return self._get_daily_data_collections(
energyflux.HorizontalInfraredRadiationIntensity(), 'W/m2', horiz_ir) |
def _finish_selecting(self, event):
"""Finaliza la seleccion.
Marca como seleccionados todos los objetos que se encuentran
dentro del recuadro de seleccion."""
self._selecting = False
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
canvas.coords(self._sobject, -1, -1, -1, -1)
canvas.itemconfigure(self._sobject, state=tk.HIDDEN)
sel_region = self._sstart[0], self._sstart[1], x, y
canvas.region_selected = sel_region
canvas.event_generate('<<RegionSelected>>') | Finaliza la seleccion.
Marca como seleccionados todos los objetos que se encuentran
dentro del recuadro de seleccion. | Below is the the instruction that describes the task:
### Input:
Finaliza la seleccion.
Marca como seleccionados todos los objetos que se encuentran
dentro del recuadro de seleccion.
### Response:
def _finish_selecting(self, event):
"""Finaliza la seleccion.
Marca como seleccionados todos los objetos que se encuentran
dentro del recuadro de seleccion."""
self._selecting = False
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
canvas.coords(self._sobject, -1, -1, -1, -1)
canvas.itemconfigure(self._sobject, state=tk.HIDDEN)
sel_region = self._sstart[0], self._sstart[1], x, y
canvas.region_selected = sel_region
canvas.event_generate('<<RegionSelected>>') |
def getSrcBlocks(self, url, dataset="", block=""):
"""
Need to list all blocks of the dataset and its parents starting from the top
For now just list the blocks from this dataset.
Client type call...
"""
if block:
params={'block_name':block, 'open_for_writing':0}
elif dataset:
params={'dataset':dataset, 'open_for_writing':0}
else:
m = 'DBSMigration: Invalid input. Either block or dataset name has to be provided'
e = 'DBSMigrate/getSrcBlocks: Invalid input. Either block or dataset name has to be provided'
dbsExceptionHandler('dbsException-invalid-input2', m, self.logger.exception, e )
return cjson.decode(self.callDBSService(url, 'blocks', params, {})) | Need to list all blocks of the dataset and its parents starting from the top
For now just list the blocks from this dataset.
Client type call... | Below is the the instruction that describes the task:
### Input:
Need to list all blocks of the dataset and its parents starting from the top
For now just list the blocks from this dataset.
Client type call...
### Response:
def getSrcBlocks(self, url, dataset="", block=""):
"""
Need to list all blocks of the dataset and its parents starting from the top
For now just list the blocks from this dataset.
Client type call...
"""
if block:
params={'block_name':block, 'open_for_writing':0}
elif dataset:
params={'dataset':dataset, 'open_for_writing':0}
else:
m = 'DBSMigration: Invalid input. Either block or dataset name has to be provided'
e = 'DBSMigrate/getSrcBlocks: Invalid input. Either block or dataset name has to be provided'
dbsExceptionHandler('dbsException-invalid-input2', m, self.logger.exception, e )
return cjson.decode(self.callDBSService(url, 'blocks', params, {})) |
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
connection = self.connect(autocommit=True)
cursor = connection.cursor()
try:
cursor.execute(
""" CREATE TABLE {marker_table} (
id BIGINT(20) NOT NULL AUTO_INCREMENT,
update_id VARCHAR(128) NOT NULL,
target_table VARCHAR(128),
inserted TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (update_id),
KEY id (id)
)
"""
.format(marker_table=self.marker_table)
)
except mysql.connector.Error as e:
if e.errno == errorcode.ER_TABLE_EXISTS_ERROR:
pass
else:
raise
connection.close() | Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset. | Below is the the instruction that describes the task:
### Input:
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
### Response:
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
connection = self.connect(autocommit=True)
cursor = connection.cursor()
try:
cursor.execute(
""" CREATE TABLE {marker_table} (
id BIGINT(20) NOT NULL AUTO_INCREMENT,
update_id VARCHAR(128) NOT NULL,
target_table VARCHAR(128),
inserted TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (update_id),
KEY id (id)
)
"""
.format(marker_table=self.marker_table)
)
except mysql.connector.Error as e:
if e.errno == errorcode.ER_TABLE_EXISTS_ERROR:
pass
else:
raise
connection.close() |
def pull(self):
"""
Pull selected repo from a remote git repository,
while preserving user changes
"""
if not os.path.exists(self.repo_dir):
yield from self.initialize_repo()
else:
yield from self.update() | Pull selected repo from a remote git repository,
while preserving user changes | Below is the the instruction that describes the task:
### Input:
Pull selected repo from a remote git repository,
while preserving user changes
### Response:
def pull(self):
"""
Pull selected repo from a remote git repository,
while preserving user changes
"""
if not os.path.exists(self.repo_dir):
yield from self.initialize_repo()
else:
yield from self.update() |
def ConvertCloudMetadataResponsesToCloudInstance(metadata_responses):
"""Convert CloudMetadataResponses to CloudInstance proto.
Ideally we'd just get the client to fill out a CloudInstance proto, but we
need to keep the flexibility of collecting new metadata and creating new
fields without a client push. So instead we bring back essentially a dict of
results and fill the proto on the server side.
Args:
metadata_responses: CloudMetadataResponses object from the client.
Returns:
CloudInstance object
Raises:
ValueError: if client passes bad or unset cloud type.
"""
if metadata_responses.instance_type == "GOOGLE":
cloud_instance = GoogleCloudInstance()
result = CloudInstance(cloud_type="GOOGLE", google=cloud_instance)
elif metadata_responses.instance_type == "AMAZON":
cloud_instance = AmazonCloudInstance()
result = CloudInstance(cloud_type="AMAZON", amazon=cloud_instance)
else:
raise ValueError(
"Unknown cloud instance type: %s" % metadata_responses.instance_type)
for cloud_metadata in metadata_responses.responses:
setattr(cloud_instance, cloud_metadata.label, cloud_metadata.text)
if result.cloud_type == "GOOGLE":
cloud_instance.unique_id = MakeGoogleUniqueID(cloud_instance)
return result | Convert CloudMetadataResponses to CloudInstance proto.
Ideally we'd just get the client to fill out a CloudInstance proto, but we
need to keep the flexibility of collecting new metadata and creating new
fields without a client push. So instead we bring back essentially a dict of
results and fill the proto on the server side.
Args:
metadata_responses: CloudMetadataResponses object from the client.
Returns:
CloudInstance object
Raises:
ValueError: if client passes bad or unset cloud type. | Below is the the instruction that describes the task:
### Input:
Convert CloudMetadataResponses to CloudInstance proto.
Ideally we'd just get the client to fill out a CloudInstance proto, but we
need to keep the flexibility of collecting new metadata and creating new
fields without a client push. So instead we bring back essentially a dict of
results and fill the proto on the server side.
Args:
metadata_responses: CloudMetadataResponses object from the client.
Returns:
CloudInstance object
Raises:
ValueError: if client passes bad or unset cloud type.
### Response:
def ConvertCloudMetadataResponsesToCloudInstance(metadata_responses):
"""Convert CloudMetadataResponses to CloudInstance proto.
Ideally we'd just get the client to fill out a CloudInstance proto, but we
need to keep the flexibility of collecting new metadata and creating new
fields without a client push. So instead we bring back essentially a dict of
results and fill the proto on the server side.
Args:
metadata_responses: CloudMetadataResponses object from the client.
Returns:
CloudInstance object
Raises:
ValueError: if client passes bad or unset cloud type.
"""
if metadata_responses.instance_type == "GOOGLE":
cloud_instance = GoogleCloudInstance()
result = CloudInstance(cloud_type="GOOGLE", google=cloud_instance)
elif metadata_responses.instance_type == "AMAZON":
cloud_instance = AmazonCloudInstance()
result = CloudInstance(cloud_type="AMAZON", amazon=cloud_instance)
else:
raise ValueError(
"Unknown cloud instance type: %s" % metadata_responses.instance_type)
for cloud_metadata in metadata_responses.responses:
setattr(cloud_instance, cloud_metadata.label, cloud_metadata.text)
if result.cloud_type == "GOOGLE":
cloud_instance.unique_id = MakeGoogleUniqueID(cloud_instance)
return result |
def validate(method, auth, *args, **kwargs):
"""Validate a method based on the METHOD_RULES above.
Raises a PyCronofyValidationError on error.
:param string method: Method being validated.
:param Auth auth: Auth instance.
:param *args: Positional arguments for method.
:param **kwargs: Keyword arguments for method.
"""
if method not in METHOD_RULES:
raise PyCronofyValidationError('Method "%s" not found.' % method, method)
m = METHOD_RULES[method]
arguments = {}
number_of_args = len(args)
for i, key in enumerate(m['args']):
if i < number_of_args:
arguments[key] = args[i]
elif key in kwargs:
arguments[key] = kwargs[key]
else:
arguments[key] = None
check_exists_in_object(method, auth, m['auth'])
if 'required' in m:
check_exists_in_dictionary(method, arguments, m['required'])
if 'datetime' in m:
check_datetime(method, arguments, m['datetime'])
if 'dicts' in m:
for d in m['dicts']:
check_exists_in_dictionary(method, arguments[d], m['dicts'][d], d)
if 'dicts_datetime' in m:
for d in m['dicts_datetime']:
check_datetime(method, arguments[d], m['dicts_datetime'][d], d) | Validate a method based on the METHOD_RULES above.
Raises a PyCronofyValidationError on error.
:param string method: Method being validated.
:param Auth auth: Auth instance.
:param *args: Positional arguments for method.
:param **kwargs: Keyword arguments for method. | Below is the the instruction that describes the task:
### Input:
Validate a method based on the METHOD_RULES above.
Raises a PyCronofyValidationError on error.
:param string method: Method being validated.
:param Auth auth: Auth instance.
:param *args: Positional arguments for method.
:param **kwargs: Keyword arguments for method.
### Response:
def validate(method, auth, *args, **kwargs):
"""Validate a method based on the METHOD_RULES above.
Raises a PyCronofyValidationError on error.
:param string method: Method being validated.
:param Auth auth: Auth instance.
:param *args: Positional arguments for method.
:param **kwargs: Keyword arguments for method.
"""
if method not in METHOD_RULES:
raise PyCronofyValidationError('Method "%s" not found.' % method, method)
m = METHOD_RULES[method]
arguments = {}
number_of_args = len(args)
for i, key in enumerate(m['args']):
if i < number_of_args:
arguments[key] = args[i]
elif key in kwargs:
arguments[key] = kwargs[key]
else:
arguments[key] = None
check_exists_in_object(method, auth, m['auth'])
if 'required' in m:
check_exists_in_dictionary(method, arguments, m['required'])
if 'datetime' in m:
check_datetime(method, arguments, m['datetime'])
if 'dicts' in m:
for d in m['dicts']:
check_exists_in_dictionary(method, arguments[d], m['dicts'][d], d)
if 'dicts_datetime' in m:
for d in m['dicts_datetime']:
check_datetime(method, arguments[d], m['dicts_datetime'][d], d) |
def _get_ncc(self, width, ratio):
''' Get the number of complete chars.
This one figures the remainder for the partial char as well.
'''
sub_chars = round(width * ratio * self.partial_chars_len)
ncc, self.remainder = divmod(sub_chars, self.partial_chars_len)
return ncc | Get the number of complete chars.
This one figures the remainder for the partial char as well. | Below is the the instruction that describes the task:
### Input:
Get the number of complete chars.
This one figures the remainder for the partial char as well.
### Response:
def _get_ncc(self, width, ratio):
''' Get the number of complete chars.
This one figures the remainder for the partial char as well.
'''
sub_chars = round(width * ratio * self.partial_chars_len)
ncc, self.remainder = divmod(sub_chars, self.partial_chars_len)
return ncc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.