code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _check_realign(data):
"""Check for realignment, which is not supported in GATK4
"""
if "gatk4" not in data["algorithm"].get("tools_off", []) and not "gatk4" == data["algorithm"].get("tools_off"):
if data["algorithm"].get("realign"):
raise ValueError("In sample %s, realign specified but it is not supported for GATK4. "
"Realignment is generally not necessary for most variant callers." %
(dd.get_sample_name(data))) | Check for realignment, which is not supported in GATK4 | Below is the the instruction that describes the task:
### Input:
Check for realignment, which is not supported in GATK4
### Response:
def _check_realign(data):
"""Check for realignment, which is not supported in GATK4
"""
if "gatk4" not in data["algorithm"].get("tools_off", []) and not "gatk4" == data["algorithm"].get("tools_off"):
if data["algorithm"].get("realign"):
raise ValueError("In sample %s, realign specified but it is not supported for GATK4. "
"Realignment is generally not necessary for most variant callers." %
(dd.get_sample_name(data))) |
def http_method_not_allowed(self, *args, **kwargs):
"""
Returns super after setting the Content-Type header to
``application/json``
"""
resp = super(JsonResponseMixin, self).http_method_not_allowed(*args, **kwargs)
resp['Content-Type'] = 'application/json'
return resp | Returns super after setting the Content-Type header to
``application/json`` | Below is the the instruction that describes the task:
### Input:
Returns super after setting the Content-Type header to
``application/json``
### Response:
def http_method_not_allowed(self, *args, **kwargs):
"""
Returns super after setting the Content-Type header to
``application/json``
"""
resp = super(JsonResponseMixin, self).http_method_not_allowed(*args, **kwargs)
resp['Content-Type'] = 'application/json'
return resp |
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape | Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`. | Below is the the instruction that describes the task:
### Input:
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
### Response:
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape |
def _save(self, data):
"""
Take the data from a dict and commit them to appropriate attributes.
"""
self.state = data.get('state')
self.created = dt_time(data.get('created'))
self.updated = dt_time(data.get('updated')) | Take the data from a dict and commit them to appropriate attributes. | Below is the the instruction that describes the task:
### Input:
Take the data from a dict and commit them to appropriate attributes.
### Response:
def _save(self, data):
"""
Take the data from a dict and commit them to appropriate attributes.
"""
self.state = data.get('state')
self.created = dt_time(data.get('created'))
self.updated = dt_time(data.get('updated')) |
def _add_state_callback(self, state_id, state):
""" Save summarize(state) on policy shared context before
the state is stored
"""
summary = self.summarize(state)
if summary is None:
return
with self.locked_context('summaries', dict) as ctx:
ctx[state_id] = summary | Save summarize(state) on policy shared context before
the state is stored | Below is the the instruction that describes the task:
### Input:
Save summarize(state) on policy shared context before
the state is stored
### Response:
def _add_state_callback(self, state_id, state):
""" Save summarize(state) on policy shared context before
the state is stored
"""
summary = self.summarize(state)
if summary is None:
return
with self.locked_context('summaries', dict) as ctx:
ctx[state_id] = summary |
def initialize(state):
"""
Synchronize the stack and register state (manticore->qemu)
"""
logger.debug(f"Copying {stack_top - state.cpu.SP} bytes in the stack..")
stack_bottom = min(state.cpu.SP, gdb.getR('SP'))
for address in range(stack_bottom, stack_top):
b = state.cpu.read_int(address, 8)
gdb.setByte(address, chr(b))
logger.debug("Done")
# Qemu fd's start at 5, ours at 3. Add two filler fds
mcore_stdout = state.platform.files[1]
state.platform.files.append(mcore_stdout)
state.platform.files.append(mcore_stdout)
# Sync gdb's regs
for gdb_reg in gdb.getCanonicalRegisters():
if gdb_reg.endswith('psr'):
mcore_reg = 'APSR'
else:
mcore_reg = gdb_reg.upper()
value = state.cpu.read_register(mcore_reg)
gdb.setR(gdb_reg, value) | Synchronize the stack and register state (manticore->qemu) | Below is the the instruction that describes the task:
### Input:
Synchronize the stack and register state (manticore->qemu)
### Response:
def initialize(state):
"""
Synchronize the stack and register state (manticore->qemu)
"""
logger.debug(f"Copying {stack_top - state.cpu.SP} bytes in the stack..")
stack_bottom = min(state.cpu.SP, gdb.getR('SP'))
for address in range(stack_bottom, stack_top):
b = state.cpu.read_int(address, 8)
gdb.setByte(address, chr(b))
logger.debug("Done")
# Qemu fd's start at 5, ours at 3. Add two filler fds
mcore_stdout = state.platform.files[1]
state.platform.files.append(mcore_stdout)
state.platform.files.append(mcore_stdout)
# Sync gdb's regs
for gdb_reg in gdb.getCanonicalRegisters():
if gdb_reg.endswith('psr'):
mcore_reg = 'APSR'
else:
mcore_reg = gdb_reg.upper()
value = state.cpu.read_register(mcore_reg)
gdb.setR(gdb_reg, value) |
def format_sync_stats(self, cnt):
'''
Format stats of the sync output.
:param cnt:
:return:
'''
stats = salt.utils.odict.OrderedDict()
if cnt.get('retcode') == salt.defaults.exitcodes.EX_OK:
for line in cnt.get('stdout', '').split(os.linesep):
line = line.split(': ')
if len(line) == 2:
stats[line[0].lower().replace(' ', '_')] = line[1]
cnt['transfer'] = stats
del cnt['stdout']
# Remove empty
empty_sections = []
for section in cnt:
if not cnt[section] and section != 'retcode':
empty_sections.append(section)
for section in empty_sections:
del cnt[section]
return cnt | Format stats of the sync output.
:param cnt:
:return: | Below is the the instruction that describes the task:
### Input:
Format stats of the sync output.
:param cnt:
:return:
### Response:
def format_sync_stats(self, cnt):
'''
Format stats of the sync output.
:param cnt:
:return:
'''
stats = salt.utils.odict.OrderedDict()
if cnt.get('retcode') == salt.defaults.exitcodes.EX_OK:
for line in cnt.get('stdout', '').split(os.linesep):
line = line.split(': ')
if len(line) == 2:
stats[line[0].lower().replace(' ', '_')] = line[1]
cnt['transfer'] = stats
del cnt['stdout']
# Remove empty
empty_sections = []
for section in cnt:
if not cnt[section] and section != 'retcode':
empty_sections.append(section)
for section in empty_sections:
del cnt[section]
return cnt |
def delete_user_by_email(self, id, email):
"""Deletes a specified connection user by its email.
Args:
id (str): The id of the connection (must be a database connection).
email (str): The email of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email
Returns:
An empty dict.
"""
return self.client.delete(self._url(id) + '/users', params={'email': email}) | Deletes a specified connection user by its email.
Args:
id (str): The id of the connection (must be a database connection).
email (str): The email of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email
Returns:
An empty dict. | Below is the the instruction that describes the task:
### Input:
Deletes a specified connection user by its email.
Args:
id (str): The id of the connection (must be a database connection).
email (str): The email of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email
Returns:
An empty dict.
### Response:
def delete_user_by_email(self, id, email):
"""Deletes a specified connection user by its email.
Args:
id (str): The id of the connection (must be a database connection).
email (str): The email of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email
Returns:
An empty dict.
"""
return self.client.delete(self._url(id) + '/users', params={'email': email}) |
def get_formset(self, request, obj=None, **kwargs):
"""
Return a form, if the obj has a staffmember object, otherwise
return an empty form
"""
if obj is not None and self.model.objects.filter(user=obj).count():
return super(StaffMemberAdmin, self).get_formset(
request,
obj,
**kwargs
)
defaults = {
"exclude": None,
"extra": 0,
"max_num": 0,
}
return inlineformset_factory(self.parent_model, self.model, **defaults) | Return a form, if the obj has a staffmember object, otherwise
return an empty form | Below is the the instruction that describes the task:
### Input:
Return a form, if the obj has a staffmember object, otherwise
return an empty form
### Response:
def get_formset(self, request, obj=None, **kwargs):
"""
Return a form, if the obj has a staffmember object, otherwise
return an empty form
"""
if obj is not None and self.model.objects.filter(user=obj).count():
return super(StaffMemberAdmin, self).get_formset(
request,
obj,
**kwargs
)
defaults = {
"exclude": None,
"extra": 0,
"max_num": 0,
}
return inlineformset_factory(self.parent_model, self.model, **defaults) |
def _parse_residue(self, residue):
"""
Extracts Residue Name, Number, Chain, Model, Atoms.
I/O: xml object <response> / dictionary
"""
# Filter Element Nodes
childs = [ child for child in residue.childNodes if child.nodeType == child.ELEMENT_NODE ]
# Parse info out
resi = int(childs[0].firstChild.data.strip())
resn = childs[1].firstChild.data.strip()
icode = childs[3].firstChild.data
chain = childs[4].firstChild.data.strip()
model = int(childs[5].firstChild.data.strip())
atoms = childs[6:]
# Output
return {'name': resn, 'number': resi,
'icode': icode, 'chain': chain,
'model': model, 'atoms': atoms} | Extracts Residue Name, Number, Chain, Model, Atoms.
I/O: xml object <response> / dictionary | Below is the the instruction that describes the task:
### Input:
Extracts Residue Name, Number, Chain, Model, Atoms.
I/O: xml object <response> / dictionary
### Response:
def _parse_residue(self, residue):
"""
Extracts Residue Name, Number, Chain, Model, Atoms.
I/O: xml object <response> / dictionary
"""
# Filter Element Nodes
childs = [ child for child in residue.childNodes if child.nodeType == child.ELEMENT_NODE ]
# Parse info out
resi = int(childs[0].firstChild.data.strip())
resn = childs[1].firstChild.data.strip()
icode = childs[3].firstChild.data
chain = childs[4].firstChild.data.strip()
model = int(childs[5].firstChild.data.strip())
atoms = childs[6:]
# Output
return {'name': resn, 'number': resi,
'icode': icode, 'chain': chain,
'model': model, 'atoms': atoms} |
def update_client_grants(self, client_id, scope=[], authorities=[],
grant_types=[], redirect_uri=[], replace=False):
"""
Will extend the client with additional scopes or
authorities. Any existing scopes and authorities will be left
as is unless asked to replace entirely.
"""
self.assert_has_permission('clients.write')
client = self.get_client(client_id)
if not client:
raise ValueError("Must first create client: '%s'" % (client_id))
if replace:
changes = {
'client_id': client_id,
'scope': scope,
'authorities': authorities,
}
else:
changes = {'client_id': client_id}
if scope:
changes['scope'] = client['scope']
changes['scope'].extend(scope)
if authorities:
changes['authorities'] = client['authorities']
changes['authorities'].extend(authorities)
if grant_types:
if 'authorization_code' in grant_types and not redirect_uri:
logging.warning("A redirect_uri is required for authorization_code.")
changes['authorized_grant_types'] = client['authorized_grant_types']
changes['authorized_grant_types'].extend(grant_types)
if redirect_uri:
if 'redirect_uri' in client:
changes['redirect_uri'] = client['redirect_uri']
changes['redirect_uri'].extend(redirect_uri)
else:
changes['redirect_uri'] = redirect_uri
uri = self.uri + '/oauth/clients/' + client_id
headers = {
"pragma": "no-cache",
"Cache-Control": "no-cache",
"Content-Type": "application/json",
"Accepts": "application/json",
"Authorization": "Bearer " + self.get_token()
}
logging.debug("URI=" + str(uri))
logging.debug("HEADERS=" + str(headers))
logging.debug("BODY=" + json.dumps(changes))
response = requests.put(uri, headers=headers, data=json.dumps(changes))
logging.debug("STATUS=" + str(response.status_code))
if response.status_code == 200:
return response
else:
logging.error(response.content)
response.raise_for_status() | Will extend the client with additional scopes or
authorities. Any existing scopes and authorities will be left
as is unless asked to replace entirely. | Below is the the instruction that describes the task:
### Input:
Will extend the client with additional scopes or
authorities. Any existing scopes and authorities will be left
as is unless asked to replace entirely.
### Response:
def update_client_grants(self, client_id, scope=[], authorities=[],
grant_types=[], redirect_uri=[], replace=False):
"""
Will extend the client with additional scopes or
authorities. Any existing scopes and authorities will be left
as is unless asked to replace entirely.
"""
self.assert_has_permission('clients.write')
client = self.get_client(client_id)
if not client:
raise ValueError("Must first create client: '%s'" % (client_id))
if replace:
changes = {
'client_id': client_id,
'scope': scope,
'authorities': authorities,
}
else:
changes = {'client_id': client_id}
if scope:
changes['scope'] = client['scope']
changes['scope'].extend(scope)
if authorities:
changes['authorities'] = client['authorities']
changes['authorities'].extend(authorities)
if grant_types:
if 'authorization_code' in grant_types and not redirect_uri:
logging.warning("A redirect_uri is required for authorization_code.")
changes['authorized_grant_types'] = client['authorized_grant_types']
changes['authorized_grant_types'].extend(grant_types)
if redirect_uri:
if 'redirect_uri' in client:
changes['redirect_uri'] = client['redirect_uri']
changes['redirect_uri'].extend(redirect_uri)
else:
changes['redirect_uri'] = redirect_uri
uri = self.uri + '/oauth/clients/' + client_id
headers = {
"pragma": "no-cache",
"Cache-Control": "no-cache",
"Content-Type": "application/json",
"Accepts": "application/json",
"Authorization": "Bearer " + self.get_token()
}
logging.debug("URI=" + str(uri))
logging.debug("HEADERS=" + str(headers))
logging.debug("BODY=" + json.dumps(changes))
response = requests.put(uri, headers=headers, data=json.dumps(changes))
logging.debug("STATUS=" + str(response.status_code))
if response.status_code == 200:
return response
else:
logging.error(response.content)
response.raise_for_status() |
def _compute_transitions(self, corpus, order=1):
""" Computes the transition probabilities of a corpus
Args:
corpus: the given corpus (a corpus_entry needs to be iterable)
order: the maximal Markov chain order
"""
self.transitions = defaultdict(lambda: defaultdict(int))
for corpus_entry in corpus:
tokens = self.tokenize(corpus_entry)
last_tokens = utils.prefilled_buffer(
self._start_symbol, length=self.order)
# count the occurrences of "present | past"
for token_value in chain(tokens, self._end_symbol):
for suffix in utils.get_suffixes(last_tokens):
self.transitions[suffix][token_value] += 1
last_tokens.append(token_value)
self._compute_relative_probs(self.transitions) | Computes the transition probabilities of a corpus
Args:
corpus: the given corpus (a corpus_entry needs to be iterable)
order: the maximal Markov chain order | Below is the the instruction that describes the task:
### Input:
Computes the transition probabilities of a corpus
Args:
corpus: the given corpus (a corpus_entry needs to be iterable)
order: the maximal Markov chain order
### Response:
def _compute_transitions(self, corpus, order=1):
""" Computes the transition probabilities of a corpus
Args:
corpus: the given corpus (a corpus_entry needs to be iterable)
order: the maximal Markov chain order
"""
self.transitions = defaultdict(lambda: defaultdict(int))
for corpus_entry in corpus:
tokens = self.tokenize(corpus_entry)
last_tokens = utils.prefilled_buffer(
self._start_symbol, length=self.order)
# count the occurrences of "present | past"
for token_value in chain(tokens, self._end_symbol):
for suffix in utils.get_suffixes(last_tokens):
self.transitions[suffix][token_value] += 1
last_tokens.append(token_value)
self._compute_relative_probs(self.transitions) |
def mutate(self, field):
"""Mutate the given field, modifying it directly. This is not
intended to preserve the value of the field.
:field: The pfp.fields.Field instance that will receive the new value
"""
new_val = self.next_val(field)
field._pfp__set_value(new_val)
return field | Mutate the given field, modifying it directly. This is not
intended to preserve the value of the field.
:field: The pfp.fields.Field instance that will receive the new value | Below is the the instruction that describes the task:
### Input:
Mutate the given field, modifying it directly. This is not
intended to preserve the value of the field.
:field: The pfp.fields.Field instance that will receive the new value
### Response:
def mutate(self, field):
"""Mutate the given field, modifying it directly. This is not
intended to preserve the value of the field.
:field: The pfp.fields.Field instance that will receive the new value
"""
new_val = self.next_val(field)
field._pfp__set_value(new_val)
return field |
def contour(self, level):
"""Get contour lines at the given level.
Parameters
----------
level : numbers.Number
The data level to calculate the contour lines for.
Returns
-------
:
The result of the :attr:`formatter` called on the contour at the
given `level`.
"""
if not isinstance(level, numbers.Number):
raise TypeError(
("'_level' must be of type 'numbers.Number' but is "
"'{:s}'").format(type(level)))
vertices = self._contour_generator.create_contour(level)
return self.formatter(level, vertices) | Get contour lines at the given level.
Parameters
----------
level : numbers.Number
The data level to calculate the contour lines for.
Returns
-------
:
The result of the :attr:`formatter` called on the contour at the
given `level`. | Below is the the instruction that describes the task:
### Input:
Get contour lines at the given level.
Parameters
----------
level : numbers.Number
The data level to calculate the contour lines for.
Returns
-------
:
The result of the :attr:`formatter` called on the contour at the
given `level`.
### Response:
def contour(self, level):
"""Get contour lines at the given level.
Parameters
----------
level : numbers.Number
The data level to calculate the contour lines for.
Returns
-------
:
The result of the :attr:`formatter` called on the contour at the
given `level`.
"""
if not isinstance(level, numbers.Number):
raise TypeError(
("'_level' must be of type 'numbers.Number' but is "
"'{:s}'").format(type(level)))
vertices = self._contour_generator.create_contour(level)
return self.formatter(level, vertices) |
def get_colors(catalog):
"""
Pull colors from catalog
Parameters
----------
catalog: filename
"""
print("Get Colors")
a = pyfits.open(catalog)
data = a[1].data
a.close()
all_ids = data['LAMOST_ID_1']
all_ids = np.array([val.strip() for val in all_ids])
# G magnitude
gmag = data['gpmag']
gmag_err = data['e_gpmag']
# R magnitude
rmag = data['rpmag']
rmag_err = data['e_rpmag']
# I magnitude
imag = data['ipmag']
imag_err = data['e_ipmag']
# W1
W1 = data['W1mag']
W1_err = data['e_W1mag']
# W1
W2 = data['W2mag']
W2_err = data['e_W2mag']
# J magnitude
Jmag = data['Jmag']
Jmag_err = data['e_Jmag']
# H magnitude
Hmag = data['Hmag']
Hmag_err = data['e_Hmag']
# K magnitude
Kmag = data['Kmag']
Kmag_err = data['e_Kmag']
# Stack
mag = np.vstack((
gmag, rmag, imag, Jmag, Hmag, Kmag, W2, W1)) # 8, nobj
mag_err = np.vstack((
gmag_err, rmag_err, imag_err, Jmag_err,
Hmag_err, Kmag_err, W2_err, W1_err))
# Make g-r, r-i, i-J, etc
col = mag[:-1] - mag[1:]
col_ivar = 1/(mag_err[:-1]**2 + mag_err[1:]**2)
# There's something wrong with the i-band, I think..so the second color r-i
#bad = col[:,1] < 0.0
#col_ivar[bad] = 0.0
return all_ids, col, col_ivar | Pull colors from catalog
Parameters
----------
catalog: filename | Below is the the instruction that describes the task:
### Input:
Pull colors from catalog
Parameters
----------
catalog: filename
### Response:
def get_colors(catalog):
"""
Pull colors from catalog
Parameters
----------
catalog: filename
"""
print("Get Colors")
a = pyfits.open(catalog)
data = a[1].data
a.close()
all_ids = data['LAMOST_ID_1']
all_ids = np.array([val.strip() for val in all_ids])
# G magnitude
gmag = data['gpmag']
gmag_err = data['e_gpmag']
# R magnitude
rmag = data['rpmag']
rmag_err = data['e_rpmag']
# I magnitude
imag = data['ipmag']
imag_err = data['e_ipmag']
# W1
W1 = data['W1mag']
W1_err = data['e_W1mag']
# W1
W2 = data['W2mag']
W2_err = data['e_W2mag']
# J magnitude
Jmag = data['Jmag']
Jmag_err = data['e_Jmag']
# H magnitude
Hmag = data['Hmag']
Hmag_err = data['e_Hmag']
# K magnitude
Kmag = data['Kmag']
Kmag_err = data['e_Kmag']
# Stack
mag = np.vstack((
gmag, rmag, imag, Jmag, Hmag, Kmag, W2, W1)) # 8, nobj
mag_err = np.vstack((
gmag_err, rmag_err, imag_err, Jmag_err,
Hmag_err, Kmag_err, W2_err, W1_err))
# Make g-r, r-i, i-J, etc
col = mag[:-1] - mag[1:]
col_ivar = 1/(mag_err[:-1]**2 + mag_err[1:]**2)
# There's something wrong with the i-band, I think..so the second color r-i
#bad = col[:,1] < 0.0
#col_ivar[bad] = 0.0
return all_ids, col, col_ivar |
def save_json(data, json_file, indent=4, **kwargs):
"""
Takes a dictionary and saves it to a file as JSON
.. code:: python
my_dict = {"key_1": "val_1",
"key_for_dict": {"sub_dict_key": 8}}
reusables.save_json(my_dict,"example.json")
example.json
.. code::
{
"key_1": "val_1",
"key_for_dict": {
"sub_dict_key": 8
}
}
:param data: dictionary to save as JSON
:param json_file: Path to save file location as str
:param indent: Format the JSON file with so many numbers of spaces
:param kwargs: Additional arguments for the json.dump command
"""
with open(json_file, "w") as f:
json.dump(data, f, indent=indent, **kwargs) | Takes a dictionary and saves it to a file as JSON
.. code:: python
my_dict = {"key_1": "val_1",
"key_for_dict": {"sub_dict_key": 8}}
reusables.save_json(my_dict,"example.json")
example.json
.. code::
{
"key_1": "val_1",
"key_for_dict": {
"sub_dict_key": 8
}
}
:param data: dictionary to save as JSON
:param json_file: Path to save file location as str
:param indent: Format the JSON file with so many numbers of spaces
:param kwargs: Additional arguments for the json.dump command | Below is the the instruction that describes the task:
### Input:
Takes a dictionary and saves it to a file as JSON
.. code:: python
my_dict = {"key_1": "val_1",
"key_for_dict": {"sub_dict_key": 8}}
reusables.save_json(my_dict,"example.json")
example.json
.. code::
{
"key_1": "val_1",
"key_for_dict": {
"sub_dict_key": 8
}
}
:param data: dictionary to save as JSON
:param json_file: Path to save file location as str
:param indent: Format the JSON file with so many numbers of spaces
:param kwargs: Additional arguments for the json.dump command
### Response:
def save_json(data, json_file, indent=4, **kwargs):
"""
Takes a dictionary and saves it to a file as JSON
.. code:: python
my_dict = {"key_1": "val_1",
"key_for_dict": {"sub_dict_key": 8}}
reusables.save_json(my_dict,"example.json")
example.json
.. code::
{
"key_1": "val_1",
"key_for_dict": {
"sub_dict_key": 8
}
}
:param data: dictionary to save as JSON
:param json_file: Path to save file location as str
:param indent: Format the JSON file with so many numbers of spaces
:param kwargs: Additional arguments for the json.dump command
"""
with open(json_file, "w") as f:
json.dump(data, f, indent=indent, **kwargs) |
def query(self, action=None):
"""
returns cached query string (without &format=json) for given action,
or list of cached actions
"""
if action in self.cache:
return self.cache[action]['query'].replace('&format=json', '')
return self.cache.keys() or None | returns cached query string (without &format=json) for given action,
or list of cached actions | Below is the the instruction that describes the task:
### Input:
returns cached query string (without &format=json) for given action,
or list of cached actions
### Response:
def query(self, action=None):
"""
returns cached query string (without &format=json) for given action,
or list of cached actions
"""
if action in self.cache:
return self.cache[action]['query'].replace('&format=json', '')
return self.cache.keys() or None |
def before_request(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:
"""Add a before request function.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.before_request
def func():
...
Arguments:
func: The before request function itself.
name: Optional blueprint key name.
"""
handler = ensure_coroutine(func)
self.before_request_funcs[name].append(handler)
return func | Add a before request function.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.before_request
def func():
...
Arguments:
func: The before request function itself.
name: Optional blueprint key name. | Below is the the instruction that describes the task:
### Input:
Add a before request function.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.before_request
def func():
...
Arguments:
func: The before request function itself.
name: Optional blueprint key name.
### Response:
def before_request(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:
"""Add a before request function.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.before_request
def func():
...
Arguments:
func: The before request function itself.
name: Optional blueprint key name.
"""
handler = ensure_coroutine(func)
self.before_request_funcs[name].append(handler)
return func |
def save_cb(self, mode):
"""Save image, figure, and plot data arrays."""
# This just defines the basename.
# Extension has to be explicitly defined or things can get messy.
w = Widgets.SaveDialog(title='Save {0} data'.format(mode))
filename = w.get_path()
if filename is None:
# user canceled dialog
return
# TODO: This can be a user preference?
fig_dpi = 100
if mode == 'cuts':
fig, xarr, yarr = self.cuts_plot.get_data()
elif mode == 'slit':
fig, xarr, yarr = self.slit_plot.get_data()
figname = filename + '.png'
self.logger.info("saving figure as: %s" % (figname))
fig.savefig(figname, dpi=fig_dpi)
dataname = filename + '.npz'
self.logger.info("saving data as: %s" % (dataname))
np.savez_compressed(dataname, x=xarr, y=yarr) | Save image, figure, and plot data arrays. | Below is the the instruction that describes the task:
### Input:
Save image, figure, and plot data arrays.
### Response:
def save_cb(self, mode):
"""Save image, figure, and plot data arrays."""
# This just defines the basename.
# Extension has to be explicitly defined or things can get messy.
w = Widgets.SaveDialog(title='Save {0} data'.format(mode))
filename = w.get_path()
if filename is None:
# user canceled dialog
return
# TODO: This can be a user preference?
fig_dpi = 100
if mode == 'cuts':
fig, xarr, yarr = self.cuts_plot.get_data()
elif mode == 'slit':
fig, xarr, yarr = self.slit_plot.get_data()
figname = filename + '.png'
self.logger.info("saving figure as: %s" % (figname))
fig.savefig(figname, dpi=fig_dpi)
dataname = filename + '.npz'
self.logger.info("saving data as: %s" % (dataname))
np.savez_compressed(dataname, x=xarr, y=yarr) |
def anonymous_login(self):
"""Login as anonymous user
:return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_
:rtype: :class:`.EResult`
"""
self._LOG.debug("Attempting Anonymous login")
self._pre_login()
self.username = None
self.login_key = None
message = MsgProto(EMsg.ClientLogon)
message.header.steamid = SteamID(type='AnonUser', universe='Public')
message.body.protocol_version = 65579
self.send(message)
resp = self.wait_msg(EMsg.ClientLogOnResponse, timeout=30)
return EResult(resp.body.eresult) if resp else EResult.Fail | Login as anonymous user
:return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_
:rtype: :class:`.EResult` | Below is the the instruction that describes the task:
### Input:
Login as anonymous user
:return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_
:rtype: :class:`.EResult`
### Response:
def anonymous_login(self):
"""Login as anonymous user
:return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_
:rtype: :class:`.EResult`
"""
self._LOG.debug("Attempting Anonymous login")
self._pre_login()
self.username = None
self.login_key = None
message = MsgProto(EMsg.ClientLogon)
message.header.steamid = SteamID(type='AnonUser', universe='Public')
message.body.protocol_version = 65579
self.send(message)
resp = self.wait_msg(EMsg.ClientLogOnResponse, timeout=30)
return EResult(resp.body.eresult) if resp else EResult.Fail |
def colour_for_wp(self, wp_num):
'''return a tuple describing the colour a waypoint should appear on the map'''
wp = self.module('wp').wploader.wp(wp_num)
command = wp.command
return self._colour_for_wp_command.get(command, (0,255,0)) | return a tuple describing the colour a waypoint should appear on the map | Below is the the instruction that describes the task:
### Input:
return a tuple describing the colour a waypoint should appear on the map
### Response:
def colour_for_wp(self, wp_num):
'''return a tuple describing the colour a waypoint should appear on the map'''
wp = self.module('wp').wploader.wp(wp_num)
command = wp.command
return self._colour_for_wp_command.get(command, (0,255,0)) |
def createEncoder():
"""
Creates and returns a #MultiEncoder including a ScalarEncoder for
energy consumption and a DateEncoder for the time of the day.
@see nupic/encoders/__init__.py for type to file-name mapping
@see nupic/encoders for encoder source files
"""
encoder = MultiEncoder()
encoder.addMultipleEncoders({
"consumption": {"fieldname": u"consumption",
"type": "ScalarEncoder",
"name": u"consumption",
"minval": 0.0,
"maxval": 100.0,
"clipInput": True,
"w": 21,
"n": 500},
"timestamp_timeOfDay": {"fieldname": u"timestamp",
"type": "DateEncoder",
"name": u"timestamp_timeOfDay",
"timeOfDay": (21, 9.5)}
})
return encoder | Creates and returns a #MultiEncoder including a ScalarEncoder for
energy consumption and a DateEncoder for the time of the day.
@see nupic/encoders/__init__.py for type to file-name mapping
@see nupic/encoders for encoder source files | Below is the the instruction that describes the task:
### Input:
Creates and returns a #MultiEncoder including a ScalarEncoder for
energy consumption and a DateEncoder for the time of the day.
@see nupic/encoders/__init__.py for type to file-name mapping
@see nupic/encoders for encoder source files
### Response:
def createEncoder():
"""
Creates and returns a #MultiEncoder including a ScalarEncoder for
energy consumption and a DateEncoder for the time of the day.
@see nupic/encoders/__init__.py for type to file-name mapping
@see nupic/encoders for encoder source files
"""
encoder = MultiEncoder()
encoder.addMultipleEncoders({
"consumption": {"fieldname": u"consumption",
"type": "ScalarEncoder",
"name": u"consumption",
"minval": 0.0,
"maxval": 100.0,
"clipInput": True,
"w": 21,
"n": 500},
"timestamp_timeOfDay": {"fieldname": u"timestamp",
"type": "DateEncoder",
"name": u"timestamp_timeOfDay",
"timeOfDay": (21, 9.5)}
})
return encoder |
def unwrap(self, encrypted_data):
"""
Decrypts the data send by the server using the TLS channel negotiated
between the client and the server.
:param encrypted_data: the byte string of the encrypted data
:return: a byte string of the decrypted data
"""
length = self.tls_connection.bio_write(encrypted_data)
data = b''
counter = 0
while True:
try:
data_chunk = self.tls_connection.recv(self.BIO_BUFFER_SIZE)
except SSL.WantReadError:
break
data += data_chunk
counter += self.BIO_BUFFER_SIZE
if counter > length:
break
return data | Decrypts the data send by the server using the TLS channel negotiated
between the client and the server.
:param encrypted_data: the byte string of the encrypted data
:return: a byte string of the decrypted data | Below is the the instruction that describes the task:
### Input:
Decrypts the data send by the server using the TLS channel negotiated
between the client and the server.
:param encrypted_data: the byte string of the encrypted data
:return: a byte string of the decrypted data
### Response:
def unwrap(self, encrypted_data):
"""
Decrypts the data send by the server using the TLS channel negotiated
between the client and the server.
:param encrypted_data: the byte string of the encrypted data
:return: a byte string of the decrypted data
"""
length = self.tls_connection.bio_write(encrypted_data)
data = b''
counter = 0
while True:
try:
data_chunk = self.tls_connection.recv(self.BIO_BUFFER_SIZE)
except SSL.WantReadError:
break
data += data_chunk
counter += self.BIO_BUFFER_SIZE
if counter > length:
break
return data |
def get_vlan_assignment_uuid(self):
"""Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID
"""
vlan_uuid_cmd = self.cli_commands['resource-pool']
if vlan_uuid_cmd:
return self._run_eos_cmds(commands=vlan_uuid_cmd)[0]
return None | Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID | Below is the the instruction that describes the task:
### Input:
Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID
### Response:
def get_vlan_assignment_uuid(self):
"""Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID
"""
vlan_uuid_cmd = self.cli_commands['resource-pool']
if vlan_uuid_cmd:
return self._run_eos_cmds(commands=vlan_uuid_cmd)[0]
return None |
def get_fault_type_dummy_variables(self, rup):
"""
Fault-type classification dummy variable based on rup.rake.
"``H`` is 1 for a strike-slip mechanism and 0 for a reverse mechanism"
(p. 1201).
Note:
UserWarning is raised if mechanism is determined to be normal
faulting, since as summarized in Table 2 on p. 1197 the data used
for regression included only reverse and stike-slip events.
"""
# normal faulting
is_normal = np.array(
self.RAKE_THRESH < -rup.rake < (180. - self.RAKE_THRESH))
# reverse raulting
is_reverse = np.array(
self.RAKE_THRESH < rup.rake < (180. - self.RAKE_THRESH))
if not self.ALREADY_WARNED and is_normal.any():
# make sure that the warning is printed only once to avoid
# flooding the terminal
msg = ('Normal faulting not supported by %s; '
'treating as strike-slip' % type(self).__name__)
warnings.warn(msg, UserWarning)
self.ALREADY_WARNED = True
is_strike_slip = ~is_reverse | is_normal
is_strike_slip = is_strike_slip.astype(float)
return is_strike_slip | Fault-type classification dummy variable based on rup.rake.
"``H`` is 1 for a strike-slip mechanism and 0 for a reverse mechanism"
(p. 1201).
Note:
UserWarning is raised if mechanism is determined to be normal
faulting, since as summarized in Table 2 on p. 1197 the data used
for regression included only reverse and stike-slip events. | Below is the the instruction that describes the task:
### Input:
Fault-type classification dummy variable based on rup.rake.
"``H`` is 1 for a strike-slip mechanism and 0 for a reverse mechanism"
(p. 1201).
Note:
UserWarning is raised if mechanism is determined to be normal
faulting, since as summarized in Table 2 on p. 1197 the data used
for regression included only reverse and stike-slip events.
### Response:
def get_fault_type_dummy_variables(self, rup):
"""
Fault-type classification dummy variable based on rup.rake.
"``H`` is 1 for a strike-slip mechanism and 0 for a reverse mechanism"
(p. 1201).
Note:
UserWarning is raised if mechanism is determined to be normal
faulting, since as summarized in Table 2 on p. 1197 the data used
for regression included only reverse and stike-slip events.
"""
# normal faulting
is_normal = np.array(
self.RAKE_THRESH < -rup.rake < (180. - self.RAKE_THRESH))
# reverse raulting
is_reverse = np.array(
self.RAKE_THRESH < rup.rake < (180. - self.RAKE_THRESH))
if not self.ALREADY_WARNED and is_normal.any():
# make sure that the warning is printed only once to avoid
# flooding the terminal
msg = ('Normal faulting not supported by %s; '
'treating as strike-slip' % type(self).__name__)
warnings.warn(msg, UserWarning)
self.ALREADY_WARNED = True
is_strike_slip = ~is_reverse | is_normal
is_strike_slip = is_strike_slip.astype(float)
return is_strike_slip |
def error(self, message, payload=None):
"""DEPRECATED"""
self.set_tag('error', True)
if payload:
self.log(event=message, payload=payload)
else:
self.log(event=message)
return self | DEPRECATED | Below is the the instruction that describes the task:
### Input:
DEPRECATED
### Response:
def error(self, message, payload=None):
"""DEPRECATED"""
self.set_tag('error', True)
if payload:
self.log(event=message, payload=payload)
else:
self.log(event=message)
return self |
def check_page_for_warnings(html: str) -> None:
"""
Checks if is any warnings on page if so raises an exception
"""
soup = BeautifulSoup(html, 'html.parser')
warnings = soup.find_all('div', {'class': 'service_msg_warning'})
if warnings:
exception_msg = '; '.join((warning.get_text() for warning in warnings))
raise VVKPageWarningException(exception_msg) | Checks if is any warnings on page if so raises an exception | Below is the the instruction that describes the task:
### Input:
Checks if is any warnings on page if so raises an exception
### Response:
def check_page_for_warnings(html: str) -> None:
"""
Checks if is any warnings on page if so raises an exception
"""
soup = BeautifulSoup(html, 'html.parser')
warnings = soup.find_all('div', {'class': 'service_msg_warning'})
if warnings:
exception_msg = '; '.join((warning.get_text() for warning in warnings))
raise VVKPageWarningException(exception_msg) |
def call_from_thread(self, fn):
"""Allow thread-safe calls to ioloop functions.
Uses add_callback if not in the IOLoop thread, otherwise calls
directly. Returns an already resolved `tornado.concurrent.Future` if in
ioloop, otherwise a `concurrent.Future`. Logs unhandled exceptions.
Resolves with an exception if one occurred.
"""
if self.in_ioloop_thread():
f = tornado_Future()
try:
f.set_result(fn())
except Exception, e:
f.set_exception(e)
self._logger.exception('Error executing callback '
'in ioloop thread')
finally:
return f
else:
f = Future()
try:
f.set_running_or_notify_cancel()
def send_message_callback():
try:
f.set_result(fn())
except Exception, e:
f.set_exception(e)
self._logger.exception(
'Error executing wrapped async callback')
self.ioloop.add_callback(send_message_callback)
finally:
return f | Allow thread-safe calls to ioloop functions.
Uses add_callback if not in the IOLoop thread, otherwise calls
directly. Returns an already resolved `tornado.concurrent.Future` if in
ioloop, otherwise a `concurrent.Future`. Logs unhandled exceptions.
Resolves with an exception if one occurred. | Below is the the instruction that describes the task:
### Input:
Allow thread-safe calls to ioloop functions.
Uses add_callback if not in the IOLoop thread, otherwise calls
directly. Returns an already resolved `tornado.concurrent.Future` if in
ioloop, otherwise a `concurrent.Future`. Logs unhandled exceptions.
Resolves with an exception if one occurred.
### Response:
def call_from_thread(self, fn):
"""Allow thread-safe calls to ioloop functions.
Uses add_callback if not in the IOLoop thread, otherwise calls
directly. Returns an already resolved `tornado.concurrent.Future` if in
ioloop, otherwise a `concurrent.Future`. Logs unhandled exceptions.
Resolves with an exception if one occurred.
"""
if self.in_ioloop_thread():
f = tornado_Future()
try:
f.set_result(fn())
except Exception, e:
f.set_exception(e)
self._logger.exception('Error executing callback '
'in ioloop thread')
finally:
return f
else:
f = Future()
try:
f.set_running_or_notify_cancel()
def send_message_callback():
try:
f.set_result(fn())
except Exception, e:
f.set_exception(e)
self._logger.exception(
'Error executing wrapped async callback')
self.ioloop.add_callback(send_message_callback)
finally:
return f |
def crashlog_clean(name, timestamp, size, **kwargs):
"""
For application NAME leave SIZE crashlogs or remove all crashlogs with timestamp > TIMESTAMP.
"""
ctx = Context(**kwargs)
ctx.execute_action('crashlog:clean', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'size': size,
'timestamp': timestamp,
}) | For application NAME leave SIZE crashlogs or remove all crashlogs with timestamp > TIMESTAMP. | Below is the the instruction that describes the task:
### Input:
For application NAME leave SIZE crashlogs or remove all crashlogs with timestamp > TIMESTAMP.
### Response:
def crashlog_clean(name, timestamp, size, **kwargs):
"""
For application NAME leave SIZE crashlogs or remove all crashlogs with timestamp > TIMESTAMP.
"""
ctx = Context(**kwargs)
ctx.execute_action('crashlog:clean', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'size': size,
'timestamp': timestamp,
}) |
def prep_search_string(self, search_string, add_wildcards=False):
"""Prepares search string as a proper whoosh search string.
Args:
search_string (str): it prepares the search string and see if
the lenght is correct.
Optional Args:
add_wildcards (bool): It runs a query for inexact queries.
Raises:
ValueError: When the search string does not have the appropriate
lenght. This lenght may be changed in the config options.
"""
s = search_string.strip()
try:
s = str(s)
except:
pass
s = s.replace('*', '')
if len(s) < self._pw.search_string_min_len:
raise ValueError('Search string must have at least {} characters'
.format(self._pw.search_string_min_len))
if add_wildcards:
s = '*{0}*'.format(re.sub('[\s]+', '* *', s))
return s | Prepares search string as a proper whoosh search string.
Args:
search_string (str): it prepares the search string and see if
the lenght is correct.
Optional Args:
add_wildcards (bool): It runs a query for inexact queries.
Raises:
ValueError: When the search string does not have the appropriate
lenght. This lenght may be changed in the config options. | Below is the the instruction that describes the task:
### Input:
Prepares search string as a proper whoosh search string.
Args:
search_string (str): it prepares the search string and see if
the lenght is correct.
Optional Args:
add_wildcards (bool): It runs a query for inexact queries.
Raises:
ValueError: When the search string does not have the appropriate
lenght. This lenght may be changed in the config options.
### Response:
def prep_search_string(self, search_string, add_wildcards=False):
"""Prepares search string as a proper whoosh search string.
Args:
search_string (str): it prepares the search string and see if
the lenght is correct.
Optional Args:
add_wildcards (bool): It runs a query for inexact queries.
Raises:
ValueError: When the search string does not have the appropriate
lenght. This lenght may be changed in the config options.
"""
s = search_string.strip()
try:
s = str(s)
except:
pass
s = s.replace('*', '')
if len(s) < self._pw.search_string_min_len:
raise ValueError('Search string must have at least {} characters'
.format(self._pw.search_string_min_len))
if add_wildcards:
s = '*{0}*'.format(re.sub('[\s]+', '* *', s))
return s |
def _check_feature_dependences(self):
"""Verify feature dependences."""
for mid in self.modules:
for fst in self.modules[mid].statement.find_all("feature"):
fn, fid = self.resolve_pname(fst.argument, mid)
if fn not in self.modules[fid].features:
continue
if not self.if_features(fst, mid):
raise FeaturePrerequisiteError(*fn) | Verify feature dependences. | Below is the the instruction that describes the task:
### Input:
Verify feature dependences.
### Response:
def _check_feature_dependences(self):
"""Verify feature dependences."""
for mid in self.modules:
for fst in self.modules[mid].statement.find_all("feature"):
fn, fid = self.resolve_pname(fst.argument, mid)
if fn not in self.modules[fid].features:
continue
if not self.if_features(fst, mid):
raise FeaturePrerequisiteError(*fn) |
def visit_yield(self, node, parent):
"""visit a Yield node by returning a fresh instance of it"""
newnode = nodes.Yield(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode | visit a Yield node by returning a fresh instance of it | Below is the the instruction that describes the task:
### Input:
visit a Yield node by returning a fresh instance of it
### Response:
def visit_yield(self, node, parent):
"""visit a Yield node by returning a fresh instance of it"""
newnode = nodes.Yield(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode |
def get_stoch(self, symbol, interval='daily', fastkperiod=None,
slowkperiod=None, slowdperiod=None, slowkmatype=None, slowdmatype=None):
""" Return the stochatic oscillator values in two
json objects as data and meta_data. It raises ValueError when problems
arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min', 'daily',
'weekly', 'monthly' (default 'daily')
fastkperiod: The time period of the fastk moving average. Positive
integers are accepted (default=None)
slowkperiod: The time period of the slowk moving average. Positive
integers are accepted (default=None)
slowdperiod: The time period of the slowd moving average. Positive
integers are accepted (default=None)
slowkmatype: Moving average type for the slowk moving average.
By default, fastmatype=0. Integers 0 - 8 are accepted
(check down the mappings) or the string containing the math type can
also be used.
slowdmatype: Moving average type for the slowd moving average.
By default, slowmatype=0. Integers 0 - 8 are accepted
(check down the mappings) or the string containing the math type can
also be used.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA)
"""
_FUNCTION_KEY = "STOCH"
return _FUNCTION_KEY, 'Technical Analysis: STOCH', 'Meta Data' | Return the stochatic oscillator values in two
json objects as data and meta_data. It raises ValueError when problems
arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min', 'daily',
'weekly', 'monthly' (default 'daily')
fastkperiod: The time period of the fastk moving average. Positive
integers are accepted (default=None)
slowkperiod: The time period of the slowk moving average. Positive
integers are accepted (default=None)
slowdperiod: The time period of the slowd moving average. Positive
integers are accepted (default=None)
slowkmatype: Moving average type for the slowk moving average.
By default, fastmatype=0. Integers 0 - 8 are accepted
(check down the mappings) or the string containing the math type can
also be used.
slowdmatype: Moving average type for the slowd moving average.
By default, slowmatype=0. Integers 0 - 8 are accepted
(check down the mappings) or the string containing the math type can
also be used.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA) | Below is the the instruction that describes the task:
### Input:
Return the stochatic oscillator values in two
json objects as data and meta_data. It raises ValueError when problems
arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min', 'daily',
'weekly', 'monthly' (default 'daily')
fastkperiod: The time period of the fastk moving average. Positive
integers are accepted (default=None)
slowkperiod: The time period of the slowk moving average. Positive
integers are accepted (default=None)
slowdperiod: The time period of the slowd moving average. Positive
integers are accepted (default=None)
slowkmatype: Moving average type for the slowk moving average.
By default, fastmatype=0. Integers 0 - 8 are accepted
(check down the mappings) or the string containing the math type can
also be used.
slowdmatype: Moving average type for the slowd moving average.
By default, slowmatype=0. Integers 0 - 8 are accepted
(check down the mappings) or the string containing the math type can
also be used.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA)
### Response:
def get_stoch(self, symbol, interval='daily', fastkperiod=None,
slowkperiod=None, slowdperiod=None, slowkmatype=None, slowdmatype=None):
""" Return the stochatic oscillator values in two
json objects as data and meta_data. It raises ValueError when problems
arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min', 'daily',
'weekly', 'monthly' (default 'daily')
fastkperiod: The time period of the fastk moving average. Positive
integers are accepted (default=None)
slowkperiod: The time period of the slowk moving average. Positive
integers are accepted (default=None)
slowdperiod: The time period of the slowd moving average. Positive
integers are accepted (default=None)
slowkmatype: Moving average type for the slowk moving average.
By default, fastmatype=0. Integers 0 - 8 are accepted
(check down the mappings) or the string containing the math type can
also be used.
slowdmatype: Moving average type for the slowd moving average.
By default, slowmatype=0. Integers 0 - 8 are accepted
(check down the mappings) or the string containing the math type can
also be used.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA)
"""
_FUNCTION_KEY = "STOCH"
return _FUNCTION_KEY, 'Technical Analysis: STOCH', 'Meta Data' |
def load_core_file(core_fp):
"""
For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
core = {}
with open(core_fp) as in_f:
for line in in_f.read().splitlines():
if not line.startswith("#"):
otu_id, tax = line.split("\t")
core[otu_id] = oc.otu_name(ast.literal_eval(tax))
return core | For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level. | Below is the the instruction that describes the task:
### Input:
For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
### Response:
def load_core_file(core_fp):
"""
For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
core = {}
with open(core_fp) as in_f:
for line in in_f.read().splitlines():
if not line.startswith("#"):
otu_id, tax = line.split("\t")
core[otu_id] = oc.otu_name(ast.literal_eval(tax))
return core |
def fits_recarray_to_dict(table):
"""Convert a FITS recarray to a python dictionary."""
cols = {}
for icol, col in enumerate(table.columns.names):
col_data = table.data[col]
if type(col_data[0]) == np.float32:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == np.float64:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == str:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.string_:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.int16:
cols[col] = np.array(col_data, dtype=int)
elif type(col_data[0]) == np.ndarray:
cols[col] = np.array(col_data)
else:
raise Exception(
'Unrecognized column type: %s %s' % (col, str(type(col_data))))
return cols | Convert a FITS recarray to a python dictionary. | Below is the the instruction that describes the task:
### Input:
Convert a FITS recarray to a python dictionary.
### Response:
def fits_recarray_to_dict(table):
"""Convert a FITS recarray to a python dictionary."""
cols = {}
for icol, col in enumerate(table.columns.names):
col_data = table.data[col]
if type(col_data[0]) == np.float32:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == np.float64:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == str:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.string_:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.int16:
cols[col] = np.array(col_data, dtype=int)
elif type(col_data[0]) == np.ndarray:
cols[col] = np.array(col_data)
else:
raise Exception(
'Unrecognized column type: %s %s' % (col, str(type(col_data))))
return cols |
def find_nodes(self, **kwargs):
"""Searches the data nodes that are associated with this graph using
the key word arguments as a filter and returns a
:class:`django.db.models.query.QuerySet`` of the attached
:class:`Node` objects.
:param kwargs:
filter arguments applied to searching the :class:`BaseNodeData`
subclass associated with this graph.
:returns:
``QuerySet`` of :class:`Node` objects
"""
filter_args = {}
classname = self.data_content_type.model_class().__name__.lower()
for key, value in kwargs.items():
filter_args['%s__%s' % (classname, key)] = value
return Node.objects.filter(**filter_args) | Searches the data nodes that are associated with this graph using
the key word arguments as a filter and returns a
:class:`django.db.models.query.QuerySet`` of the attached
:class:`Node` objects.
:param kwargs:
filter arguments applied to searching the :class:`BaseNodeData`
subclass associated with this graph.
:returns:
``QuerySet`` of :class:`Node` objects | Below is the the instruction that describes the task:
### Input:
Searches the data nodes that are associated with this graph using
the key word arguments as a filter and returns a
:class:`django.db.models.query.QuerySet`` of the attached
:class:`Node` objects.
:param kwargs:
filter arguments applied to searching the :class:`BaseNodeData`
subclass associated with this graph.
:returns:
``QuerySet`` of :class:`Node` objects
### Response:
def find_nodes(self, **kwargs):
"""Searches the data nodes that are associated with this graph using
the key word arguments as a filter and returns a
:class:`django.db.models.query.QuerySet`` of the attached
:class:`Node` objects.
:param kwargs:
filter arguments applied to searching the :class:`BaseNodeData`
subclass associated with this graph.
:returns:
``QuerySet`` of :class:`Node` objects
"""
filter_args = {}
classname = self.data_content_type.model_class().__name__.lower()
for key, value in kwargs.items():
filter_args['%s__%s' % (classname, key)] = value
return Node.objects.filter(**filter_args) |
def _startStationsNode(self, name, attrs):
"""Process the start of a node under xtvd/stations"""
if name == 'station':
self._stationId = attrs.get('id')
self._callSign = None
self._stationName = None
self._affiliate = None
self._fccChannelNumber = None | Process the start of a node under xtvd/stations | Below is the the instruction that describes the task:
### Input:
Process the start of a node under xtvd/stations
### Response:
def _startStationsNode(self, name, attrs):
"""Process the start of a node under xtvd/stations"""
if name == 'station':
self._stationId = attrs.get('id')
self._callSign = None
self._stationName = None
self._affiliate = None
self._fccChannelNumber = None |
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model] | Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation. | Below is the the instruction that describes the task:
### Input:
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
### Response:
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model] |
def parse_multipart_form_data(boundary, data, arguments, files):
"""Parses a multipart/form-data body.
The boundary and data parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b('"')) and boundary.endswith(b('"')):
boundary = boundary[1:-1]
if data.endswith(b("\r\n")):
footer_length = len(boundary) + 6
else:
footer_length = len(boundary) + 4
parts = data[:-footer_length].split(b("--") + boundary + b("\r\n"))
for part in parts:
if not part: continue
eoh = part.find(b("\r\n\r\n"))
if eoh == -1:
logging.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b("\r\n")):
logging.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
if not disp_params.get("name"):
logging.warning("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(dict(
filename=disp_params["filename"], body=value,
content_type=ctype))
else:
arguments.setdefault(name, []).append(value) | Parses a multipart/form-data body.
The boundary and data parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body. | Below is the the instruction that describes the task:
### Input:
Parses a multipart/form-data body.
The boundary and data parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
### Response:
def parse_multipart_form_data(boundary, data, arguments, files):
"""Parses a multipart/form-data body.
The boundary and data parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b('"')) and boundary.endswith(b('"')):
boundary = boundary[1:-1]
if data.endswith(b("\r\n")):
footer_length = len(boundary) + 6
else:
footer_length = len(boundary) + 4
parts = data[:-footer_length].split(b("--") + boundary + b("\r\n"))
for part in parts:
if not part: continue
eoh = part.find(b("\r\n\r\n"))
if eoh == -1:
logging.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b("\r\n")):
logging.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
if not disp_params.get("name"):
logging.warning("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(dict(
filename=disp_params["filename"], body=value,
content_type=ctype))
else:
arguments.setdefault(name, []).append(value) |
def tagversion(repo, level='patch', special=''):
"""Increment and return tagged version in git.
Increment levels are patch, minor and major.
Using semver.org versioning: {major}.{minor}.{patch}{special}
Special must start with a-z and consist of _a-zA-Z0-9.
"""
prepend = 'v'
with lcd(repo):
oldversion = local(
'git describe --abbrev=0 --tags', capture=True).strip()
if oldversion.startswith('v'):
oldversion = oldversion[1:]
else:
prepend = ''
major, minor, patch = [int(x) for x in re.split('\D', oldversion, 3)[:3]]
if special:
if not re.match('^[a-z]', special):
raise ValueError('Special must start with a-z')
if not re.match('[_a-zA-Z0-9]+', special):
raise ValueError('Must contain start with lowercase letter')
if level == 'major':
major, minor, patch = major + 1, 0, 0
elif level == 'minor':
major, minor, patch = major, minor + 1, 0
elif level == 'patch':
major, minor, patch = major, minor, patch + 1
version_string = '{}.{}.{}'.format(major, minor, patch) + special
with lcd(repo):
local('git tag -s --force {}{}'.format(prepend, version_string))
return version_string | Increment and return tagged version in git.
Increment levels are patch, minor and major.
Using semver.org versioning: {major}.{minor}.{patch}{special}
Special must start with a-z and consist of _a-zA-Z0-9. | Below is the the instruction that describes the task:
### Input:
Increment and return tagged version in git.
Increment levels are patch, minor and major.
Using semver.org versioning: {major}.{minor}.{patch}{special}
Special must start with a-z and consist of _a-zA-Z0-9.
### Response:
def tagversion(repo, level='patch', special=''):
"""Increment and return tagged version in git.
Increment levels are patch, minor and major.
Using semver.org versioning: {major}.{minor}.{patch}{special}
Special must start with a-z and consist of _a-zA-Z0-9.
"""
prepend = 'v'
with lcd(repo):
oldversion = local(
'git describe --abbrev=0 --tags', capture=True).strip()
if oldversion.startswith('v'):
oldversion = oldversion[1:]
else:
prepend = ''
major, minor, patch = [int(x) for x in re.split('\D', oldversion, 3)[:3]]
if special:
if not re.match('^[a-z]', special):
raise ValueError('Special must start with a-z')
if not re.match('[_a-zA-Z0-9]+', special):
raise ValueError('Must contain start with lowercase letter')
if level == 'major':
major, minor, patch = major + 1, 0, 0
elif level == 'minor':
major, minor, patch = major, minor + 1, 0
elif level == 'patch':
major, minor, patch = major, minor, patch + 1
version_string = '{}.{}.{}'.format(major, minor, patch) + special
with lcd(repo):
local('git tag -s --force {}{}'.format(prepend, version_string))
return version_string |
def join(self, target):
"""join a channel"""
password = self.config.passwords.get(
target.strip(self.server_config['CHANTYPES']))
if password:
target += ' ' + password
self.send_line('JOIN %s' % target) | join a channel | Below is the the instruction that describes the task:
### Input:
join a channel
### Response:
def join(self, target):
"""join a channel"""
password = self.config.passwords.get(
target.strip(self.server_config['CHANTYPES']))
if password:
target += ' ' + password
self.send_line('JOIN %s' % target) |
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
) | `patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases. | Below is the the instruction that describes the task:
### Input:
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
### Response:
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
) |
def on_bind_target(self, target, ctx=None):
"""Fired after target is bound to self.
:param target: newly bound target.
:param ctx: target ctx.
"""
_on_bind_target = getattr(self, Annotation._ON_BIND_TARGET, None)
if _on_bind_target is not None:
_on_bind_target(self, target=target, ctx=ctx) | Fired after target is bound to self.
:param target: newly bound target.
:param ctx: target ctx. | Below is the the instruction that describes the task:
### Input:
Fired after target is bound to self.
:param target: newly bound target.
:param ctx: target ctx.
### Response:
def on_bind_target(self, target, ctx=None):
"""Fired after target is bound to self.
:param target: newly bound target.
:param ctx: target ctx.
"""
_on_bind_target = getattr(self, Annotation._ON_BIND_TARGET, None)
if _on_bind_target is not None:
_on_bind_target(self, target=target, ctx=ctx) |
def open_mfbpchdataset(paths, concat_dim='time', compat='no_conflicts',
preprocess=None, lock=None, **kwargs):
""" Open multiple bpch files as a single dataset.
You must have dask installed for this to work, as this greatly
simplifies issues relating to multi-file I/O.
Also, please note that this is not a very performant routine. I/O is still
limited by the fact that we need to manually scan/read through each bpch
file so that we can figure out what its contents are, since that metadata
isn't saved anywhere. So this routine will actually sequentially load
Datasets for each bpch file, then concatenate them along the "time" axis.
You may wish to simply process each file individually, coerce to NetCDF,
and then ingest through xarray as normal.
Parameters
----------
paths : list of strs
Filenames to load; order doesn't matter as they will be
lexicographically sorted before we read in the data
concat_dim : str, default='time'
Dimension to concatenate Datasets over. We default to "time" since this
is how GEOS-Chem splits output files
compat : str (optional)
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable (optional)
A pre-processing function to apply to each Dataset prior to
concatenation
lock : False, True, or threading.Lock (optional)
Passed to :py:func:`dask.array.from_array`. By default, xarray
employs a per-variable lock when reading data from NetCDF files,
but this model has not yet been extended or implemented for bpch files
and so this is not actually used. However, it is likely necessary
before dask's multi-threaded backend can be used
**kwargs : optional
Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`.
"""
from xarray.backends.api import _MultiFileCloser
# TODO: Include file locks?
# Check for dask
dask = kwargs.pop('dask', False)
if not dask:
raise ValueError("Reading multiple files without dask is not supported")
kwargs['dask'] = True
# Add th
if isinstance(paths, basestring):
paths = sorted(glob(paths))
if not paths:
raise IOError("No paths to files were passed into open_mfbpchdataset")
datasets = [open_bpchdataset(filename, **kwargs)
for filename in paths]
bpch_objs = [ds._file_obj for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
# Concatenate over time
combined = xr.auto_combine(datasets, compat=compat, concat_dim=concat_dim)
combined._file_obj = _MultiFileCloser(bpch_objs)
combined.attrs = datasets[0].attrs
ts = get_timestamp()
fns_str = " ".join(paths)
combined.attrs['history'] = (
"{}: Processed/loaded by xbpch-{} from {}"
.format(ts, ver, fns_str)
)
return combined | Open multiple bpch files as a single dataset.
You must have dask installed for this to work, as this greatly
simplifies issues relating to multi-file I/O.
Also, please note that this is not a very performant routine. I/O is still
limited by the fact that we need to manually scan/read through each bpch
file so that we can figure out what its contents are, since that metadata
isn't saved anywhere. So this routine will actually sequentially load
Datasets for each bpch file, then concatenate them along the "time" axis.
You may wish to simply process each file individually, coerce to NetCDF,
and then ingest through xarray as normal.
Parameters
----------
paths : list of strs
Filenames to load; order doesn't matter as they will be
lexicographically sorted before we read in the data
concat_dim : str, default='time'
Dimension to concatenate Datasets over. We default to "time" since this
is how GEOS-Chem splits output files
compat : str (optional)
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable (optional)
A pre-processing function to apply to each Dataset prior to
concatenation
lock : False, True, or threading.Lock (optional)
Passed to :py:func:`dask.array.from_array`. By default, xarray
employs a per-variable lock when reading data from NetCDF files,
but this model has not yet been extended or implemented for bpch files
and so this is not actually used. However, it is likely necessary
before dask's multi-threaded backend can be used
**kwargs : optional
Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`. | Below is the the instruction that describes the task:
### Input:
Open multiple bpch files as a single dataset.
You must have dask installed for this to work, as this greatly
simplifies issues relating to multi-file I/O.
Also, please note that this is not a very performant routine. I/O is still
limited by the fact that we need to manually scan/read through each bpch
file so that we can figure out what its contents are, since that metadata
isn't saved anywhere. So this routine will actually sequentially load
Datasets for each bpch file, then concatenate them along the "time" axis.
You may wish to simply process each file individually, coerce to NetCDF,
and then ingest through xarray as normal.
Parameters
----------
paths : list of strs
Filenames to load; order doesn't matter as they will be
lexicographically sorted before we read in the data
concat_dim : str, default='time'
Dimension to concatenate Datasets over. We default to "time" since this
is how GEOS-Chem splits output files
compat : str (optional)
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable (optional)
A pre-processing function to apply to each Dataset prior to
concatenation
lock : False, True, or threading.Lock (optional)
Passed to :py:func:`dask.array.from_array`. By default, xarray
employs a per-variable lock when reading data from NetCDF files,
but this model has not yet been extended or implemented for bpch files
and so this is not actually used. However, it is likely necessary
before dask's multi-threaded backend can be used
**kwargs : optional
Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`.
### Response:
def open_mfbpchdataset(paths, concat_dim='time', compat='no_conflicts',
preprocess=None, lock=None, **kwargs):
""" Open multiple bpch files as a single dataset.
You must have dask installed for this to work, as this greatly
simplifies issues relating to multi-file I/O.
Also, please note that this is not a very performant routine. I/O is still
limited by the fact that we need to manually scan/read through each bpch
file so that we can figure out what its contents are, since that metadata
isn't saved anywhere. So this routine will actually sequentially load
Datasets for each bpch file, then concatenate them along the "time" axis.
You may wish to simply process each file individually, coerce to NetCDF,
and then ingest through xarray as normal.
Parameters
----------
paths : list of strs
Filenames to load; order doesn't matter as they will be
lexicographically sorted before we read in the data
concat_dim : str, default='time'
Dimension to concatenate Datasets over. We default to "time" since this
is how GEOS-Chem splits output files
compat : str (optional)
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable (optional)
A pre-processing function to apply to each Dataset prior to
concatenation
lock : False, True, or threading.Lock (optional)
Passed to :py:func:`dask.array.from_array`. By default, xarray
employs a per-variable lock when reading data from NetCDF files,
but this model has not yet been extended or implemented for bpch files
and so this is not actually used. However, it is likely necessary
before dask's multi-threaded backend can be used
**kwargs : optional
Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`.
"""
from xarray.backends.api import _MultiFileCloser
# TODO: Include file locks?
# Check for dask
dask = kwargs.pop('dask', False)
if not dask:
raise ValueError("Reading multiple files without dask is not supported")
kwargs['dask'] = True
# Add th
if isinstance(paths, basestring):
paths = sorted(glob(paths))
if not paths:
raise IOError("No paths to files were passed into open_mfbpchdataset")
datasets = [open_bpchdataset(filename, **kwargs)
for filename in paths]
bpch_objs = [ds._file_obj for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
# Concatenate over time
combined = xr.auto_combine(datasets, compat=compat, concat_dim=concat_dim)
combined._file_obj = _MultiFileCloser(bpch_objs)
combined.attrs = datasets[0].attrs
ts = get_timestamp()
fns_str = " ".join(paths)
combined.attrs['history'] = (
"{}: Processed/loaded by xbpch-{} from {}"
.format(ts, ver, fns_str)
)
return combined |
def patch_sys_version():
""" Remove Continuum copyright statement to avoid parsing errors in IDLE """
if '|' in sys.version:
sys_version = sys.version.split('|')
sys.version = ' '.join([sys_version[0].strip(), sys_version[-1].strip()]) | Remove Continuum copyright statement to avoid parsing errors in IDLE | Below is the the instruction that describes the task:
### Input:
Remove Continuum copyright statement to avoid parsing errors in IDLE
### Response:
def patch_sys_version():
""" Remove Continuum copyright statement to avoid parsing errors in IDLE """
if '|' in sys.version:
sys_version = sys.version.split('|')
sys.version = ' '.join([sys_version[0].strip(), sys_version[-1].strip()]) |
def checkCAS(CASRN):
'''Checks if a CAS number is valid. Returns False if the parser cannot
parse the given string..
Parameters
----------
CASRN : string
A three-piece, dash-separated set of numbers
Returns
-------
result : bool
Boolean value if CASRN was valid. If parsing fails, return False also.
Notes
-----
Check method is according to Chemical Abstract Society. However, no lookup
to their service is performed; therefore, this function cannot detect
false positives.
Function also does not support additional separators, apart from '-'.
CAS numbers up to the series 1 XXX XXX-XX-X are now being issued.
A long can hold CAS numbers up to 2 147 483-64-7
Examples
--------
>>> checkCAS('7732-18-5')
True
>>> checkCAS('77332-18-5')
False
'''
try:
check = CASRN[-1]
CASRN = CASRN[::-1][1:]
productsum = 0
i = 1
for num in CASRN:
if num == '-':
pass
else:
productsum += i*int(num)
i += 1
return (productsum % 10 == int(check))
except:
return False | Checks if a CAS number is valid. Returns False if the parser cannot
parse the given string..
Parameters
----------
CASRN : string
A three-piece, dash-separated set of numbers
Returns
-------
result : bool
Boolean value if CASRN was valid. If parsing fails, return False also.
Notes
-----
Check method is according to Chemical Abstract Society. However, no lookup
to their service is performed; therefore, this function cannot detect
false positives.
Function also does not support additional separators, apart from '-'.
CAS numbers up to the series 1 XXX XXX-XX-X are now being issued.
A long can hold CAS numbers up to 2 147 483-64-7
Examples
--------
>>> checkCAS('7732-18-5')
True
>>> checkCAS('77332-18-5')
False | Below is the the instruction that describes the task:
### Input:
Checks if a CAS number is valid. Returns False if the parser cannot
parse the given string..
Parameters
----------
CASRN : string
A three-piece, dash-separated set of numbers
Returns
-------
result : bool
Boolean value if CASRN was valid. If parsing fails, return False also.
Notes
-----
Check method is according to Chemical Abstract Society. However, no lookup
to their service is performed; therefore, this function cannot detect
false positives.
Function also does not support additional separators, apart from '-'.
CAS numbers up to the series 1 XXX XXX-XX-X are now being issued.
A long can hold CAS numbers up to 2 147 483-64-7
Examples
--------
>>> checkCAS('7732-18-5')
True
>>> checkCAS('77332-18-5')
False
### Response:
def checkCAS(CASRN):
'''Checks if a CAS number is valid. Returns False if the parser cannot
parse the given string..
Parameters
----------
CASRN : string
A three-piece, dash-separated set of numbers
Returns
-------
result : bool
Boolean value if CASRN was valid. If parsing fails, return False also.
Notes
-----
Check method is according to Chemical Abstract Society. However, no lookup
to their service is performed; therefore, this function cannot detect
false positives.
Function also does not support additional separators, apart from '-'.
CAS numbers up to the series 1 XXX XXX-XX-X are now being issued.
A long can hold CAS numbers up to 2 147 483-64-7
Examples
--------
>>> checkCAS('7732-18-5')
True
>>> checkCAS('77332-18-5')
False
'''
try:
check = CASRN[-1]
CASRN = CASRN[::-1][1:]
productsum = 0
i = 1
for num in CASRN:
if num == '-':
pass
else:
productsum += i*int(num)
i += 1
return (productsum % 10 == int(check))
except:
return False |
def CopyToDict(self):
"""Copies the path specification to a dictionary.
Returns:
dict[str, object]: path specification attributes.
"""
path_spec_dict = {}
for attribute_name, attribute_value in iter(self.__dict__.items()):
if attribute_value is None:
continue
if attribute_name == 'parent':
attribute_value = attribute_value.CopyToDict()
path_spec_dict[attribute_name] = attribute_value
return path_spec_dict | Copies the path specification to a dictionary.
Returns:
dict[str, object]: path specification attributes. | Below is the the instruction that describes the task:
### Input:
Copies the path specification to a dictionary.
Returns:
dict[str, object]: path specification attributes.
### Response:
def CopyToDict(self):
"""Copies the path specification to a dictionary.
Returns:
dict[str, object]: path specification attributes.
"""
path_spec_dict = {}
for attribute_name, attribute_value in iter(self.__dict__.items()):
if attribute_value is None:
continue
if attribute_name == 'parent':
attribute_value = attribute_value.CopyToDict()
path_spec_dict[attribute_name] = attribute_value
return path_spec_dict |
def rewrite_file(self) -> None:
"""
Rewrites the source file.
"""
if not self.needs_rewriting:
return
self._info("Rewriting file")
with open(self.full_path, "w") as outfile:
self._write(outfile) | Rewrites the source file. | Below is the the instruction that describes the task:
### Input:
Rewrites the source file.
### Response:
def rewrite_file(self) -> None:
"""
Rewrites the source file.
"""
if not self.needs_rewriting:
return
self._info("Rewriting file")
with open(self.full_path, "w") as outfile:
self._write(outfile) |
def find_element_by_selectors(webdriver, *selectors):
"""
Utility method makes it easier to find an element using multiple selectors. This is
useful for problematic elements what might works with one browser, but fail in another.
(Like different page elements being served up for different browsers)
Args:
selectors - var arg if N number of selectors to match against. Each selector should
be a Selenium 'By' object.
Usage::
my_element = WebElementSelector.find_element_by_selectors(webdriver,
(By.ID, "MyElementID"),
(By.CSS, "MyClassSelector") )
"""
# perform initial check to verify selectors are valid by statements.
for selector in selectors:
(by_method, value) = selector
if not WebElementSelector.__is_valid_by_type(by_method):
raise BadSelectorError(
u("Selectors should be of type selenium.webdriver.common.by.By"))
if type(value) != str:
raise BadSelectorError(
u("Selectors should be of type selenium.webdriver.common.by.By"))
selectors_used = []
for selector in selectors:
(by_method, value) = selector
selectors_used.append(
u("{by}:{value}").format(by=by_method, value=value))
try:
return webdriver.find_element(by=by_method, value=value)
except:
pass
raise ElementNotSelectableException(
u("Unable to find elements using:") + u(",").join(selectors_used)) | Utility method makes it easier to find an element using multiple selectors. This is
useful for problematic elements what might works with one browser, but fail in another.
(Like different page elements being served up for different browsers)
Args:
selectors - var arg if N number of selectors to match against. Each selector should
be a Selenium 'By' object.
Usage::
my_element = WebElementSelector.find_element_by_selectors(webdriver,
(By.ID, "MyElementID"),
(By.CSS, "MyClassSelector") ) | Below is the the instruction that describes the task:
### Input:
Utility method makes it easier to find an element using multiple selectors. This is
useful for problematic elements what might works with one browser, but fail in another.
(Like different page elements being served up for different browsers)
Args:
selectors - var arg if N number of selectors to match against. Each selector should
be a Selenium 'By' object.
Usage::
my_element = WebElementSelector.find_element_by_selectors(webdriver,
(By.ID, "MyElementID"),
(By.CSS, "MyClassSelector") )
### Response:
def find_element_by_selectors(webdriver, *selectors):
"""
Utility method makes it easier to find an element using multiple selectors. This is
useful for problematic elements what might works with one browser, but fail in another.
(Like different page elements being served up for different browsers)
Args:
selectors - var arg if N number of selectors to match against. Each selector should
be a Selenium 'By' object.
Usage::
my_element = WebElementSelector.find_element_by_selectors(webdriver,
(By.ID, "MyElementID"),
(By.CSS, "MyClassSelector") )
"""
# perform initial check to verify selectors are valid by statements.
for selector in selectors:
(by_method, value) = selector
if not WebElementSelector.__is_valid_by_type(by_method):
raise BadSelectorError(
u("Selectors should be of type selenium.webdriver.common.by.By"))
if type(value) != str:
raise BadSelectorError(
u("Selectors should be of type selenium.webdriver.common.by.By"))
selectors_used = []
for selector in selectors:
(by_method, value) = selector
selectors_used.append(
u("{by}:{value}").format(by=by_method, value=value))
try:
return webdriver.find_element(by=by_method, value=value)
except:
pass
raise ElementNotSelectableException(
u("Unable to find elements using:") + u(",").join(selectors_used)) |
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers() | Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None | Below is the the instruction that describes the task:
### Input:
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
### Response:
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers() |
def post_process(self, group, event, is_new, is_sample, **kwargs):
"""
Process error.
"""
if not self.is_configured(group.project):
return
host = self.get_option('server_host', group.project)
port = int(self.get_option('server_port', group.project))
prefix = self.get_option('prefix', group.project)
hostname = self.get_option('hostname', group.project) or socket.gethostname()
resolve_age = group.project.get_option('sentry:resolve_age', None)
now = int(time.time())
template = '%s.%%s[%s]' % (prefix, group.project.slug)
level = group.get_level_display()
label = template % level
groups = group.project.group_set.filter(status=STATUS_UNRESOLVED)
if resolve_age:
oldest = timezone.now() - timedelta(hours=int(resolve_age))
groups = groups.filter(last_seen__gt=oldest)
num_errors = groups.filter(level=group.level).count()
metric = Metric(hostname, label, num_errors, now)
log.info('will send %s=%s to zabbix', label, num_errors)
send_to_zabbix([metric], host, port) | Process error. | Below is the the instruction that describes the task:
### Input:
Process error.
### Response:
def post_process(self, group, event, is_new, is_sample, **kwargs):
"""
Process error.
"""
if not self.is_configured(group.project):
return
host = self.get_option('server_host', group.project)
port = int(self.get_option('server_port', group.project))
prefix = self.get_option('prefix', group.project)
hostname = self.get_option('hostname', group.project) or socket.gethostname()
resolve_age = group.project.get_option('sentry:resolve_age', None)
now = int(time.time())
template = '%s.%%s[%s]' % (prefix, group.project.slug)
level = group.get_level_display()
label = template % level
groups = group.project.group_set.filter(status=STATUS_UNRESOLVED)
if resolve_age:
oldest = timezone.now() - timedelta(hours=int(resolve_age))
groups = groups.filter(last_seen__gt=oldest)
num_errors = groups.filter(level=group.level).count()
metric = Metric(hostname, label, num_errors, now)
log.info('will send %s=%s to zabbix', label, num_errors)
send_to_zabbix([metric], host, port) |
def create_quantiles(items: Sequence, lower_bound, upper_bound):
"""Create quantile start and end boundaries."""
interval = (upper_bound - lower_bound) / len(items)
quantiles = ((g, (x - interval, x)) for g, x in
zip(items, accumulate(repeat(interval, len(items)))))
return quantiles | Create quantile start and end boundaries. | Below is the the instruction that describes the task:
### Input:
Create quantile start and end boundaries.
### Response:
def create_quantiles(items: Sequence, lower_bound, upper_bound):
"""Create quantile start and end boundaries."""
interval = (upper_bound - lower_bound) / len(items)
quantiles = ((g, (x - interval, x)) for g, x in
zip(items, accumulate(repeat(interval, len(items)))))
return quantiles |
def GameTypeEnum(ctx):
"""Game Type Enumeration."""
return Enum(
ctx,
RM=0,
Regicide=1,
DM=2,
Scenario=3,
Campaign=4,
KingOfTheHill=5,
WonderRace=6,
DefendTheWonder=7,
TurboRandom=8
) | Game Type Enumeration. | Below is the the instruction that describes the task:
### Input:
Game Type Enumeration.
### Response:
def GameTypeEnum(ctx):
"""Game Type Enumeration."""
return Enum(
ctx,
RM=0,
Regicide=1,
DM=2,
Scenario=3,
Campaign=4,
KingOfTheHill=5,
WonderRace=6,
DefendTheWonder=7,
TurboRandom=8
) |
def get_pending_enrollment_message(cls, pending_users, enrolled_in):
"""
Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of PendingEnterpriseCustomerUsers who were successfully linked with a pending enrollment
enrolled_in (str): A string identifier for the course or program the pending users were linked to
Returns:
tuple: A 2-tuple containing a message type and message text
"""
pending_emails = [pending_user.user_email for pending_user in pending_users]
return (
'warning',
_(
"The following learners do not have an account on "
"{platform_name}. They have not been enrolled in "
"{enrolled_in}. When these learners create an account, they will "
"be enrolled automatically: {pending_email_list}"
).format(
platform_name=settings.PLATFORM_NAME,
enrolled_in=enrolled_in,
pending_email_list=', '.join(pending_emails),
)
) | Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of PendingEnterpriseCustomerUsers who were successfully linked with a pending enrollment
enrolled_in (str): A string identifier for the course or program the pending users were linked to
Returns:
tuple: A 2-tuple containing a message type and message text | Below is the the instruction that describes the task:
### Input:
Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of PendingEnterpriseCustomerUsers who were successfully linked with a pending enrollment
enrolled_in (str): A string identifier for the course or program the pending users were linked to
Returns:
tuple: A 2-tuple containing a message type and message text
### Response:
def get_pending_enrollment_message(cls, pending_users, enrolled_in):
"""
Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of PendingEnterpriseCustomerUsers who were successfully linked with a pending enrollment
enrolled_in (str): A string identifier for the course or program the pending users were linked to
Returns:
tuple: A 2-tuple containing a message type and message text
"""
pending_emails = [pending_user.user_email for pending_user in pending_users]
return (
'warning',
_(
"The following learners do not have an account on "
"{platform_name}. They have not been enrolled in "
"{enrolled_in}. When these learners create an account, they will "
"be enrolled automatically: {pending_email_list}"
).format(
platform_name=settings.PLATFORM_NAME,
enrolled_in=enrolled_in,
pending_email_list=', '.join(pending_emails),
)
) |
def resolve_path(root, path):
"""Resolve a rfc 6901 path, returning the parent and the last path part."""
path = parse_path(path)
parent = root
for part in path[:-1]:
parent = get_child(parent, rfc_6901_replace(part))
return (parent, rfc_6901_replace(path[-1])) | Resolve a rfc 6901 path, returning the parent and the last path part. | Below is the the instruction that describes the task:
### Input:
Resolve a rfc 6901 path, returning the parent and the last path part.
### Response:
def resolve_path(root, path):
"""Resolve a rfc 6901 path, returning the parent and the last path part."""
path = parse_path(path)
parent = root
for part in path[:-1]:
parent = get_child(parent, rfc_6901_replace(part))
return (parent, rfc_6901_replace(path[-1])) |
def _Close(self):
"""Closes the file-like object."""
super(VHDIFile, self)._Close()
for vhdi_file in self._parent_vhdi_files:
vhdi_file.close()
for file_object in self._sub_file_objects:
file_object.close()
self._parent_vhdi_files = []
self._sub_file_objects = [] | Closes the file-like object. | Below is the the instruction that describes the task:
### Input:
Closes the file-like object.
### Response:
def _Close(self):
"""Closes the file-like object."""
super(VHDIFile, self)._Close()
for vhdi_file in self._parent_vhdi_files:
vhdi_file.close()
for file_object in self._sub_file_objects:
file_object.close()
self._parent_vhdi_files = []
self._sub_file_objects = [] |
def run(self):
"""Fetch remote code."""
link = self.content[0]
try:
r = requests.get(link)
r.raise_for_status()
self.content = [r.text]
return super(RemoteCodeBlock, self).run()
except Exception:
document = self.state.document
err = 'Unable to resolve ' + link
return [document.reporter.warning(str(err), line=self.lineno)] | Fetch remote code. | Below is the the instruction that describes the task:
### Input:
Fetch remote code.
### Response:
def run(self):
"""Fetch remote code."""
link = self.content[0]
try:
r = requests.get(link)
r.raise_for_status()
self.content = [r.text]
return super(RemoteCodeBlock, self).run()
except Exception:
document = self.state.document
err = 'Unable to resolve ' + link
return [document.reporter.warning(str(err), line=self.lineno)] |
def run_normalization(self):
"""
Run the normalization procedures
"""
for index, media_file in enumerate(
tqdm(
self.media_files,
desc="File",
disable=not self.progress,
position=0
)):
logger.info("Normalizing file {} ({} of {})".format(media_file, index + 1, self.file_count))
media_file.run_normalization()
logger.info("Normalized file written to {}".format(media_file.output_file)) | Run the normalization procedures | Below is the the instruction that describes the task:
### Input:
Run the normalization procedures
### Response:
def run_normalization(self):
"""
Run the normalization procedures
"""
for index, media_file in enumerate(
tqdm(
self.media_files,
desc="File",
disable=not self.progress,
position=0
)):
logger.info("Normalizing file {} ({} of {})".format(media_file, index + 1, self.file_count))
media_file.run_normalization()
logger.info("Normalized file written to {}".format(media_file.output_file)) |
def step_forward(self):
"""Updates state variables with computed tendencies.
Calls the :func:`compute` method to get current tendencies for all
process states. Multiplied with the timestep and added up to the state
variables is updating all model states.
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> # checking time step counter
>>> model.time['steps']
0
>>> # stepping the model forward
>>> model.step_forward()
>>> # step counter increased
>>> model.time['steps']
1
"""
tenddict = self.compute()
# Total tendency is applied as an explicit forward timestep
# (already accounting properly for order of operations in compute() )
for varname, tend in tenddict.items():
self.state[varname] += tend * self.timestep
# Update all time counters for this and all subprocesses in the tree
# Also pass diagnostics up the process tree
for name, proc, level in walk.walk_processes(self, ignoreFlag=True):
if proc.time['active_now']:
proc._update_time() | Updates state variables with computed tendencies.
Calls the :func:`compute` method to get current tendencies for all
process states. Multiplied with the timestep and added up to the state
variables is updating all model states.
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> # checking time step counter
>>> model.time['steps']
0
>>> # stepping the model forward
>>> model.step_forward()
>>> # step counter increased
>>> model.time['steps']
1 | Below is the the instruction that describes the task:
### Input:
Updates state variables with computed tendencies.
Calls the :func:`compute` method to get current tendencies for all
process states. Multiplied with the timestep and added up to the state
variables is updating all model states.
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> # checking time step counter
>>> model.time['steps']
0
>>> # stepping the model forward
>>> model.step_forward()
>>> # step counter increased
>>> model.time['steps']
1
### Response:
def step_forward(self):
"""Updates state variables with computed tendencies.
Calls the :func:`compute` method to get current tendencies for all
process states. Multiplied with the timestep and added up to the state
variables is updating all model states.
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> # checking time step counter
>>> model.time['steps']
0
>>> # stepping the model forward
>>> model.step_forward()
>>> # step counter increased
>>> model.time['steps']
1
"""
tenddict = self.compute()
# Total tendency is applied as an explicit forward timestep
# (already accounting properly for order of operations in compute() )
for varname, tend in tenddict.items():
self.state[varname] += tend * self.timestep
# Update all time counters for this and all subprocesses in the tree
# Also pass diagnostics up the process tree
for name, proc, level in walk.walk_processes(self, ignoreFlag=True):
if proc.time['active_now']:
proc._update_time() |
def on_touch_move(self, touch):
"""Move the scrollbar to the touch, and update my ``scroll``
accordingly.
"""
if not self.scrolling or 'bar' not in self.ids:
touch.ungrab(self)
return
touch.push()
touch.apply_transform_2d(self.parent.to_local)
touch.apply_transform_2d(self.to_local)
if self.orientation == 'horizontal':
hint_right_of_bar = (touch.x - self.ids.bar.x) / self.width
hint_correction = hint_right_of_bar - self._start_bar_touch_hint[0]
self.scroll += hint_correction
else: # self.orientation == 'vertical'
hint_above_bar = (touch.y - self.ids.bar.y) / self.height
hint_correction = hint_above_bar - self._start_bar_touch_hint[1]
self.scroll += hint_correction
touch.pop() | Move the scrollbar to the touch, and update my ``scroll``
accordingly. | Below is the the instruction that describes the task:
### Input:
Move the scrollbar to the touch, and update my ``scroll``
accordingly.
### Response:
def on_touch_move(self, touch):
"""Move the scrollbar to the touch, and update my ``scroll``
accordingly.
"""
if not self.scrolling or 'bar' not in self.ids:
touch.ungrab(self)
return
touch.push()
touch.apply_transform_2d(self.parent.to_local)
touch.apply_transform_2d(self.to_local)
if self.orientation == 'horizontal':
hint_right_of_bar = (touch.x - self.ids.bar.x) / self.width
hint_correction = hint_right_of_bar - self._start_bar_touch_hint[0]
self.scroll += hint_correction
else: # self.orientation == 'vertical'
hint_above_bar = (touch.y - self.ids.bar.y) / self.height
hint_correction = hint_above_bar - self._start_bar_touch_hint[1]
self.scroll += hint_correction
touch.pop() |
def find(names, dirs, file_ext):
"""
Iterating a set of dirs under the static root, this method tries to find
a file named like one of the names and file ext passed, and returns the
storage path to the first file it encounters.
Usage this method makes it possible to override static files (such as
icon sets) in a similar way like templates in different locations can
override others that have the same file name.
"""
if not isinstance(names, list) or isinstance(names, tuple):
names = (names,)
for dir_name in dirs:
for name in names:
path = os.path.join(dir_name, name + file_ext)
if not path in EXISTING_PATHS:
# check on file system, then cache
EXISTING_PATHS[path] = STATIC_STORAGE.exists(path)
if EXISTING_PATHS[path]:
return path | Iterating a set of dirs under the static root, this method tries to find
a file named like one of the names and file ext passed, and returns the
storage path to the first file it encounters.
Usage this method makes it possible to override static files (such as
icon sets) in a similar way like templates in different locations can
override others that have the same file name. | Below is the the instruction that describes the task:
### Input:
Iterating a set of dirs under the static root, this method tries to find
a file named like one of the names and file ext passed, and returns the
storage path to the first file it encounters.
Usage this method makes it possible to override static files (such as
icon sets) in a similar way like templates in different locations can
override others that have the same file name.
### Response:
def find(names, dirs, file_ext):
"""
Iterating a set of dirs under the static root, this method tries to find
a file named like one of the names and file ext passed, and returns the
storage path to the first file it encounters.
Usage this method makes it possible to override static files (such as
icon sets) in a similar way like templates in different locations can
override others that have the same file name.
"""
if not isinstance(names, list) or isinstance(names, tuple):
names = (names,)
for dir_name in dirs:
for name in names:
path = os.path.join(dir_name, name + file_ext)
if not path in EXISTING_PATHS:
# check on file system, then cache
EXISTING_PATHS[path] = STATIC_STORAGE.exists(path)
if EXISTING_PATHS[path]:
return path |
def qubo_to_ising(Q, offset=0.0):
"""Convert a QUBO problem to an Ising problem.
Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x`
defined over binary variables (0 or 1 values), where the linear term is contained along
the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values).
Return h and J that define the Ising model as well as the offset in energy
between the two problem formulations:
.. math::
x' Q x = offset + s' J s + h' s
See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function.
Args:
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, dict, float): A 3-tuple containing:
dict: Linear coefficients of the Ising problem.
dict: Quadratic coefficients of the Ising problem.
float: New energy offset.
Examples:
This example converts a QUBO problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to an Ising problem.
>>> import dimod
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_to_ising(Q, 0.5) # doctest: +SKIP
({1: 0.75, 2: 0.75}, {(1, 2): 0.25}, 1.75)
"""
h = {}
J = {}
linear_offset = 0.0
quadratic_offset = 0.0
for (u, v), bias in iteritems(Q):
if u == v:
if u in h:
h[u] += .5 * bias
else:
h[u] = .5 * bias
linear_offset += bias
else:
if bias != 0.0:
J[(u, v)] = .25 * bias
if u in h:
h[u] += .25 * bias
else:
h[u] = .25 * bias
if v in h:
h[v] += .25 * bias
else:
h[v] = .25 * bias
quadratic_offset += bias
offset += .5 * linear_offset + .25 * quadratic_offset
return h, J, offset | Convert a QUBO problem to an Ising problem.
Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x`
defined over binary variables (0 or 1 values), where the linear term is contained along
the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values).
Return h and J that define the Ising model as well as the offset in energy
between the two problem formulations:
.. math::
x' Q x = offset + s' J s + h' s
See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function.
Args:
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, dict, float): A 3-tuple containing:
dict: Linear coefficients of the Ising problem.
dict: Quadratic coefficients of the Ising problem.
float: New energy offset.
Examples:
This example converts a QUBO problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to an Ising problem.
>>> import dimod
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_to_ising(Q, 0.5) # doctest: +SKIP
({1: 0.75, 2: 0.75}, {(1, 2): 0.25}, 1.75) | Below is the the instruction that describes the task:
### Input:
Convert a QUBO problem to an Ising problem.
Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x`
defined over binary variables (0 or 1 values), where the linear term is contained along
the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values).
Return h and J that define the Ising model as well as the offset in energy
between the two problem formulations:
.. math::
x' Q x = offset + s' J s + h' s
See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function.
Args:
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, dict, float): A 3-tuple containing:
dict: Linear coefficients of the Ising problem.
dict: Quadratic coefficients of the Ising problem.
float: New energy offset.
Examples:
This example converts a QUBO problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to an Ising problem.
>>> import dimod
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_to_ising(Q, 0.5) # doctest: +SKIP
({1: 0.75, 2: 0.75}, {(1, 2): 0.25}, 1.75)
### Response:
def qubo_to_ising(Q, offset=0.0):
"""Convert a QUBO problem to an Ising problem.
Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x`
defined over binary variables (0 or 1 values), where the linear term is contained along
the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values).
Return h and J that define the Ising model as well as the offset in energy
between the two problem formulations:
.. math::
x' Q x = offset + s' J s + h' s
See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function.
Args:
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, dict, float): A 3-tuple containing:
dict: Linear coefficients of the Ising problem.
dict: Quadratic coefficients of the Ising problem.
float: New energy offset.
Examples:
This example converts a QUBO problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to an Ising problem.
>>> import dimod
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_to_ising(Q, 0.5) # doctest: +SKIP
({1: 0.75, 2: 0.75}, {(1, 2): 0.25}, 1.75)
"""
h = {}
J = {}
linear_offset = 0.0
quadratic_offset = 0.0
for (u, v), bias in iteritems(Q):
if u == v:
if u in h:
h[u] += .5 * bias
else:
h[u] = .5 * bias
linear_offset += bias
else:
if bias != 0.0:
J[(u, v)] = .25 * bias
if u in h:
h[u] += .25 * bias
else:
h[u] = .25 * bias
if v in h:
h[v] += .25 * bias
else:
h[v] = .25 * bias
quadratic_offset += bias
offset += .5 * linear_offset + .25 * quadratic_offset
return h, J, offset |
def get_config(self):
"""
Return a formatted text with main configuration parameters.
"""
# Create a dummy report object if necessary
channels = [sect.rsplit('_')[0] for sect in self.config.sections(suffix='_channel')]
channels.sort()
disabled_apps = [app for app in self._config_apps.keys() if app not in self._apps]
return u''.join([
u"\n--- %s configuration ---" % __package__,
u"\nConfiguration file: %s" % self.config.cfgfile,
u"\nConfiguration directory: %s" % self.confdir,
u"\nConfigured applications: %s" % ', '.join(self._config_apps.keys()),
u"\nDisabled applications: %s" % ', '.join(disabled_apps) if disabled_apps else '',
u"\nFilter fields: %s" % ', '.join(self.config.options('fields')),
u"\nOutput channels: %s" % ', '.join(channels) if channels else u'No channels defined',
u"\nReports: %s\n" % ', '.join(
[section[:-7] for section in self.config.sections(suffix='_report')]
),
''
]) | Return a formatted text with main configuration parameters. | Below is the the instruction that describes the task:
### Input:
Return a formatted text with main configuration parameters.
### Response:
def get_config(self):
"""
Return a formatted text with main configuration parameters.
"""
# Create a dummy report object if necessary
channels = [sect.rsplit('_')[0] for sect in self.config.sections(suffix='_channel')]
channels.sort()
disabled_apps = [app for app in self._config_apps.keys() if app not in self._apps]
return u''.join([
u"\n--- %s configuration ---" % __package__,
u"\nConfiguration file: %s" % self.config.cfgfile,
u"\nConfiguration directory: %s" % self.confdir,
u"\nConfigured applications: %s" % ', '.join(self._config_apps.keys()),
u"\nDisabled applications: %s" % ', '.join(disabled_apps) if disabled_apps else '',
u"\nFilter fields: %s" % ', '.join(self.config.options('fields')),
u"\nOutput channels: %s" % ', '.join(channels) if channels else u'No channels defined',
u"\nReports: %s\n" % ', '.join(
[section[:-7] for section in self.config.sections(suffix='_report')]
),
''
]) |
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG') | Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images | Below is the the instruction that describes the task:
### Input:
Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
### Response:
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG') |
def fit(self, x, y):
"""Train the model.
Args:
x_tr (pd.DataFrame): CEPC format dataframe containing the pairs
y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
"""
train = np.vstack((np.array([self.featurize_row(row.iloc[0],
row.iloc[1]) for idx, row in x.iterrows()]),
np.array([self.featurize_row(row.iloc[1],
row.iloc[0]) for idx, row in x.iterrows()])))
labels = np.vstack((y, -y)).ravel()
verbose = 1 if self.verbose else 0
self.clf = CLF(verbose=verbose,
min_samples_leaf=self.L,
n_estimators=self.E,
max_depth=self.max_depth,
n_jobs=self.n_jobs).fit(train, labels) | Train the model.
Args:
x_tr (pd.DataFrame): CEPC format dataframe containing the pairs
y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs | Below is the the instruction that describes the task:
### Input:
Train the model.
Args:
x_tr (pd.DataFrame): CEPC format dataframe containing the pairs
y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
### Response:
def fit(self, x, y):
"""Train the model.
Args:
x_tr (pd.DataFrame): CEPC format dataframe containing the pairs
y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
"""
train = np.vstack((np.array([self.featurize_row(row.iloc[0],
row.iloc[1]) for idx, row in x.iterrows()]),
np.array([self.featurize_row(row.iloc[1],
row.iloc[0]) for idx, row in x.iterrows()])))
labels = np.vstack((y, -y)).ravel()
verbose = 1 if self.verbose else 0
self.clf = CLF(verbose=verbose,
min_samples_leaf=self.L,
n_estimators=self.E,
max_depth=self.max_depth,
n_jobs=self.n_jobs).fit(train, labels) |
def _compute_weight(powers, wg):
"""Return the weight corresponding to given powers."""
# split
pow1 = (powers[0], 0)
pow2 = (0, powers[1])
cal1 = _compute_value(pow1, wg)
cal2 = _compute_value(pow2, wg)
return cal1 * cal2 | Return the weight corresponding to given powers. | Below is the the instruction that describes the task:
### Input:
Return the weight corresponding to given powers.
### Response:
def _compute_weight(powers, wg):
"""Return the weight corresponding to given powers."""
# split
pow1 = (powers[0], 0)
pow2 = (0, powers[1])
cal1 = _compute_value(pow1, wg)
cal2 = _compute_value(pow2, wg)
return cal1 * cal2 |
def flatten(iterable, maps=None, unique=False) -> list:
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x | Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz'] | Below is the the instruction that describes the task:
### Input:
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
### Response:
def flatten(iterable, maps=None, unique=False) -> list:
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x |
def __expand_meta_datas(meta_datas, meta_datas_expanded):
""" expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3]
"""
if isinstance(meta_datas, dict):
meta_datas_expanded.append(meta_datas)
elif isinstance(meta_datas, list):
for meta_data in meta_datas:
__expand_meta_datas(meta_data, meta_datas_expanded) | expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3] | Below is the the instruction that describes the task:
### Input:
expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3]
### Response:
def __expand_meta_datas(meta_datas, meta_datas_expanded):
""" expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3]
"""
if isinstance(meta_datas, dict):
meta_datas_expanded.append(meta_datas)
elif isinstance(meta_datas, list):
for meta_data in meta_datas:
__expand_meta_datas(meta_data, meta_datas_expanded) |
def add_tcp_callback(self, port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
if not callback:
raise AttributeError("No callback")
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind((_TCP_SOCKET_HOST, port))
serversocket.listen(1)
serversocket.setblocking(0)
self._epoll.register(serversocket.fileno(), select.EPOLLIN)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
self._tcp_server_sockets[serversocket.fileno()] = (serversocket, cb)
debug("Socket server started at port %s and callback added." % port) | Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``. | Below is the the instruction that describes the task:
### Input:
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
### Response:
def add_tcp_callback(self, port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
if not callback:
raise AttributeError("No callback")
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind((_TCP_SOCKET_HOST, port))
serversocket.listen(1)
serversocket.setblocking(0)
self._epoll.register(serversocket.fileno(), select.EPOLLIN)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
self._tcp_server_sockets[serversocket.fileno()] = (serversocket, cb)
debug("Socket server started at port %s and callback added." % port) |
def _remove_node_or_leaf(self, instance, recursive=False):
"""Removes a single node from the tree.
Only from RAM not from hdf5 file!
:param instance: The node to be deleted
:param recursive: If group nodes with children should be deleted
"""
full_name = instance.v_full_name
split_name = deque(full_name.split('.'))
self._remove_along_branch(self._root_instance, split_name, recursive) | Removes a single node from the tree.
Only from RAM not from hdf5 file!
:param instance: The node to be deleted
:param recursive: If group nodes with children should be deleted | Below is the the instruction that describes the task:
### Input:
Removes a single node from the tree.
Only from RAM not from hdf5 file!
:param instance: The node to be deleted
:param recursive: If group nodes with children should be deleted
### Response:
def _remove_node_or_leaf(self, instance, recursive=False):
"""Removes a single node from the tree.
Only from RAM not from hdf5 file!
:param instance: The node to be deleted
:param recursive: If group nodes with children should be deleted
"""
full_name = instance.v_full_name
split_name = deque(full_name.split('.'))
self._remove_along_branch(self._root_instance, split_name, recursive) |
def compute_von_neumann_entropy(data, t_max=100):
"""
Determines the Von Neumann entropy of data
at varying matrix powers. The user should select a value of t
around the "knee" of the entropy curve.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of t
Examples
--------
>>> import numpy as np
>>> import phate
>>> X = np.eye(10)
>>> X[0,0] = 5
>>> X[3,2] = 4
>>> h = phate.vne.compute_von_neumann_entropy(X)
>>> phate.vne.find_knee_point(h)
23
"""
_, eigenvalues, _ = svd(data)
entropy = []
eigenvalues_t = np.copy(eigenvalues)
for _ in range(t_max):
prob = eigenvalues_t / np.sum(eigenvalues_t)
prob = prob + np.finfo(float).eps
entropy.append(-np.sum(prob * np.log(prob)))
eigenvalues_t = eigenvalues_t * eigenvalues
entropy = np.array(entropy)
return np.array(entropy) | Determines the Von Neumann entropy of data
at varying matrix powers. The user should select a value of t
around the "knee" of the entropy curve.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of t
Examples
--------
>>> import numpy as np
>>> import phate
>>> X = np.eye(10)
>>> X[0,0] = 5
>>> X[3,2] = 4
>>> h = phate.vne.compute_von_neumann_entropy(X)
>>> phate.vne.find_knee_point(h)
23 | Below is the the instruction that describes the task:
### Input:
Determines the Von Neumann entropy of data
at varying matrix powers. The user should select a value of t
around the "knee" of the entropy curve.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of t
Examples
--------
>>> import numpy as np
>>> import phate
>>> X = np.eye(10)
>>> X[0,0] = 5
>>> X[3,2] = 4
>>> h = phate.vne.compute_von_neumann_entropy(X)
>>> phate.vne.find_knee_point(h)
23
### Response:
def compute_von_neumann_entropy(data, t_max=100):
"""
Determines the Von Neumann entropy of data
at varying matrix powers. The user should select a value of t
around the "knee" of the entropy curve.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of t
Examples
--------
>>> import numpy as np
>>> import phate
>>> X = np.eye(10)
>>> X[0,0] = 5
>>> X[3,2] = 4
>>> h = phate.vne.compute_von_neumann_entropy(X)
>>> phate.vne.find_knee_point(h)
23
"""
_, eigenvalues, _ = svd(data)
entropy = []
eigenvalues_t = np.copy(eigenvalues)
for _ in range(t_max):
prob = eigenvalues_t / np.sum(eigenvalues_t)
prob = prob + np.finfo(float).eps
entropy.append(-np.sum(prob * np.log(prob)))
eigenvalues_t = eigenvalues_t * eigenvalues
entropy = np.array(entropy)
return np.array(entropy) |
def anat_info(suffix, metadata, img, config):
"""
Generate a paragraph describing T1- and T2-weighted structural scans.
Parameters
----------
suffix : :obj:`str`
T1 or T2.
metadata : :obj:`dict`
Data from the json file associated with the scan, in dictionary
form.
img : :obj:`nibabel.Nifti1Image`
The nifti image of the scan.
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
Returns
-------
desc : :obj:`str`
A description of the scan's acquisition information.
"""
n_slices, vs_str, ms_str, fov_str = get_sizestr(img)
seqs, variants = get_seqstr(config, metadata)
if 'EchoTime' in metadata.keys():
te = num_to_str(metadata['EchoTime']*1000)
else:
te = 'UNKNOWN'
desc = '''
{suffix} {variants} {seqs} structural MRI data were collected
({n_slices} slices; repetition time, TR={tr}ms;
echo time, TE={te}ms; flip angle, FA={fa}<deg>;
field of view, FOV={fov}mm; matrix size={ms}; voxel size={vs}mm).
'''.format(suffix=suffix,
variants=variants,
seqs=seqs,
n_slices=n_slices,
tr=num_to_str(metadata['RepetitionTime']*1000),
te=te,
fa=metadata.get('FlipAngle', 'UNKNOWN'),
vs=vs_str,
fov=fov_str,
ms=ms_str,
)
desc = desc.replace('\n', ' ').lstrip()
while ' ' in desc:
desc = desc.replace(' ', ' ')
return desc | Generate a paragraph describing T1- and T2-weighted structural scans.
Parameters
----------
suffix : :obj:`str`
T1 or T2.
metadata : :obj:`dict`
Data from the json file associated with the scan, in dictionary
form.
img : :obj:`nibabel.Nifti1Image`
The nifti image of the scan.
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
Returns
-------
desc : :obj:`str`
A description of the scan's acquisition information. | Below is the the instruction that describes the task:
### Input:
Generate a paragraph describing T1- and T2-weighted structural scans.
Parameters
----------
suffix : :obj:`str`
T1 or T2.
metadata : :obj:`dict`
Data from the json file associated with the scan, in dictionary
form.
img : :obj:`nibabel.Nifti1Image`
The nifti image of the scan.
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
Returns
-------
desc : :obj:`str`
A description of the scan's acquisition information.
### Response:
def anat_info(suffix, metadata, img, config):
"""
Generate a paragraph describing T1- and T2-weighted structural scans.
Parameters
----------
suffix : :obj:`str`
T1 or T2.
metadata : :obj:`dict`
Data from the json file associated with the scan, in dictionary
form.
img : :obj:`nibabel.Nifti1Image`
The nifti image of the scan.
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
Returns
-------
desc : :obj:`str`
A description of the scan's acquisition information.
"""
n_slices, vs_str, ms_str, fov_str = get_sizestr(img)
seqs, variants = get_seqstr(config, metadata)
if 'EchoTime' in metadata.keys():
te = num_to_str(metadata['EchoTime']*1000)
else:
te = 'UNKNOWN'
desc = '''
{suffix} {variants} {seqs} structural MRI data were collected
({n_slices} slices; repetition time, TR={tr}ms;
echo time, TE={te}ms; flip angle, FA={fa}<deg>;
field of view, FOV={fov}mm; matrix size={ms}; voxel size={vs}mm).
'''.format(suffix=suffix,
variants=variants,
seqs=seqs,
n_slices=n_slices,
tr=num_to_str(metadata['RepetitionTime']*1000),
te=te,
fa=metadata.get('FlipAngle', 'UNKNOWN'),
vs=vs_str,
fov=fov_str,
ms=ms_str,
)
desc = desc.replace('\n', ' ').lstrip()
while ' ' in desc:
desc = desc.replace(' ', ' ')
return desc |
def copy(self):
"""
make an identical copy of the character
"""
return Character(self.name, self.race,self.ch_class, self.stats, self.skills, self.story, self.inventory) | make an identical copy of the character | Below is the the instruction that describes the task:
### Input:
make an identical copy of the character
### Response:
def copy(self):
"""
make an identical copy of the character
"""
return Character(self.name, self.race,self.ch_class, self.stats, self.skills, self.story, self.inventory) |
def process_soundcloud(vargs):
"""
Main SoundCloud path.
"""
artist_url = vargs['artist_url']
track_permalink = vargs['track']
keep_previews = vargs['keep']
folders = vargs['folders']
id3_extras = {}
one_track = False
likes = False
client = get_client()
if 'soundcloud' not in artist_url.lower():
if vargs['group']:
artist_url = 'https://soundcloud.com/groups/' + artist_url.lower()
elif len(track_permalink) > 0:
one_track = True
track_url = 'https://soundcloud.com/' + artist_url.lower() + '/' + track_permalink.lower()
else:
artist_url = 'https://soundcloud.com/' + artist_url.lower()
if vargs['likes'] or 'likes' in artist_url.lower():
likes = True
if 'likes' in artist_url.lower():
artist_url = artist_url[0:artist_url.find('/likes')]
likes = True
if one_track:
num_tracks = 1
else:
num_tracks = vargs['num_tracks']
try:
if one_track:
resolved = client.get('/resolve', url=track_url, limit=200)
elif likes:
userId = str(client.get('/resolve', url=artist_url).id)
resolved = client.get('/users/' + userId + '/favorites', limit=200, linked_partitioning=1)
next_href = False
if(hasattr(resolved, 'next_href')):
next_href = resolved.next_href
while (next_href):
resolved2 = requests.get(next_href).json()
if('next_href' in resolved2):
next_href = resolved2['next_href']
else:
next_href = False
resolved2 = soundcloud.resource.ResourceList(resolved2['collection'])
resolved.collection.extend(resolved2)
resolved = resolved.collection
else:
resolved = client.get('/resolve', url=artist_url, limit=200)
except Exception as e: # HTTPError?
# SoundScrape is trying to prevent us from downloading this.
# We're going to have to stop trusting the API/client and
# do all our own scraping. Boo.
if '404 Client Error' in str(e):
puts(colored.red("Problem downloading [404]: ") + colored.white("Item Not Found"))
return None
message = str(e)
item_id = message.rsplit('/', 1)[-1].split('.json')[0].split('?client_id')[0]
hard_track_url = get_hard_track_url(item_id)
track_data = get_soundcloud_data(artist_url)
puts_safe(colored.green("Scraping") + colored.white(": " + track_data['title']))
filenames = []
filename = sanitize_filename(track_data['artist'] + ' - ' + track_data['title'] + '.mp3')
if folders:
name_path = join(vargs['path'], track_data['artist'])
if not exists(name_path):
mkdir(name_path)
filename = join(name_path, filename)
else:
filename = join(vargs['path'], filename)
if exists(filename):
puts_safe(colored.yellow("Track already downloaded: ") + colored.white(track_data['title']))
return None
filename = download_file(hard_track_url, filename)
tagged = tag_file(filename,
artist=track_data['artist'],
title=track_data['title'],
year='2018',
genre='',
album='',
artwork_url='')
if not tagged:
wav_filename = filename[:-3] + 'wav'
os.rename(filename, wav_filename)
filename = wav_filename
filenames.append(filename)
else:
aggressive = False
# This is is likely a 'likes' page.
if not hasattr(resolved, 'kind'):
tracks = resolved
else:
if resolved.kind == 'artist':
artist = resolved
artist_id = str(artist.id)
tracks = client.get('/users/' + artist_id + '/tracks', limit=200)
elif resolved.kind == 'playlist':
id3_extras['album'] = resolved.title
if resolved.tracks != []:
tracks = resolved.tracks
else:
tracks = get_soundcloud_api_playlist_data(resolved.id)['tracks']
tracks = tracks[:num_tracks]
aggressive = True
for track in tracks:
download_track(track, resolved.title, keep_previews, folders, custom_path=vargs['path'])
elif resolved.kind == 'track':
tracks = [resolved]
elif resolved.kind == 'group':
group = resolved
group_id = str(group.id)
tracks = client.get('/groups/' + group_id + '/tracks', limit=200)
else:
artist = resolved
artist_id = str(artist.id)
tracks = client.get('/users/' + artist_id + '/tracks', limit=200)
if tracks == [] and artist.track_count > 0:
aggressive = True
filenames = []
# this might be buggy
data = get_soundcloud_api2_data(artist_id)
for track in data['collection']:
if len(filenames) >= num_tracks:
break
if track['type'] == 'playlist':
track['playlist']['tracks'] = track['playlist']['tracks'][:num_tracks]
for playlist_track in track['playlist']['tracks']:
album_name = track['playlist']['title']
filename = download_track(playlist_track, album_name, keep_previews, folders, filenames, custom_path=vargs['path'])
if filename:
filenames.append(filename)
else:
d_track = track['track']
filename = download_track(d_track, custom_path=vargs['path'])
if filename:
filenames.append(filename)
if not aggressive:
filenames = download_tracks(client, tracks, num_tracks, vargs['downloadable'], vargs['folders'], vargs['path'],
id3_extras=id3_extras)
if vargs['open']:
open_files(filenames) | Main SoundCloud path. | Below is the the instruction that describes the task:
### Input:
Main SoundCloud path.
### Response:
def process_soundcloud(vargs):
"""
Main SoundCloud path.
"""
artist_url = vargs['artist_url']
track_permalink = vargs['track']
keep_previews = vargs['keep']
folders = vargs['folders']
id3_extras = {}
one_track = False
likes = False
client = get_client()
if 'soundcloud' not in artist_url.lower():
if vargs['group']:
artist_url = 'https://soundcloud.com/groups/' + artist_url.lower()
elif len(track_permalink) > 0:
one_track = True
track_url = 'https://soundcloud.com/' + artist_url.lower() + '/' + track_permalink.lower()
else:
artist_url = 'https://soundcloud.com/' + artist_url.lower()
if vargs['likes'] or 'likes' in artist_url.lower():
likes = True
if 'likes' in artist_url.lower():
artist_url = artist_url[0:artist_url.find('/likes')]
likes = True
if one_track:
num_tracks = 1
else:
num_tracks = vargs['num_tracks']
try:
if one_track:
resolved = client.get('/resolve', url=track_url, limit=200)
elif likes:
userId = str(client.get('/resolve', url=artist_url).id)
resolved = client.get('/users/' + userId + '/favorites', limit=200, linked_partitioning=1)
next_href = False
if(hasattr(resolved, 'next_href')):
next_href = resolved.next_href
while (next_href):
resolved2 = requests.get(next_href).json()
if('next_href' in resolved2):
next_href = resolved2['next_href']
else:
next_href = False
resolved2 = soundcloud.resource.ResourceList(resolved2['collection'])
resolved.collection.extend(resolved2)
resolved = resolved.collection
else:
resolved = client.get('/resolve', url=artist_url, limit=200)
except Exception as e: # HTTPError?
# SoundScrape is trying to prevent us from downloading this.
# We're going to have to stop trusting the API/client and
# do all our own scraping. Boo.
if '404 Client Error' in str(e):
puts(colored.red("Problem downloading [404]: ") + colored.white("Item Not Found"))
return None
message = str(e)
item_id = message.rsplit('/', 1)[-1].split('.json')[0].split('?client_id')[0]
hard_track_url = get_hard_track_url(item_id)
track_data = get_soundcloud_data(artist_url)
puts_safe(colored.green("Scraping") + colored.white(": " + track_data['title']))
filenames = []
filename = sanitize_filename(track_data['artist'] + ' - ' + track_data['title'] + '.mp3')
if folders:
name_path = join(vargs['path'], track_data['artist'])
if not exists(name_path):
mkdir(name_path)
filename = join(name_path, filename)
else:
filename = join(vargs['path'], filename)
if exists(filename):
puts_safe(colored.yellow("Track already downloaded: ") + colored.white(track_data['title']))
return None
filename = download_file(hard_track_url, filename)
tagged = tag_file(filename,
artist=track_data['artist'],
title=track_data['title'],
year='2018',
genre='',
album='',
artwork_url='')
if not tagged:
wav_filename = filename[:-3] + 'wav'
os.rename(filename, wav_filename)
filename = wav_filename
filenames.append(filename)
else:
aggressive = False
# This is is likely a 'likes' page.
if not hasattr(resolved, 'kind'):
tracks = resolved
else:
if resolved.kind == 'artist':
artist = resolved
artist_id = str(artist.id)
tracks = client.get('/users/' + artist_id + '/tracks', limit=200)
elif resolved.kind == 'playlist':
id3_extras['album'] = resolved.title
if resolved.tracks != []:
tracks = resolved.tracks
else:
tracks = get_soundcloud_api_playlist_data(resolved.id)['tracks']
tracks = tracks[:num_tracks]
aggressive = True
for track in tracks:
download_track(track, resolved.title, keep_previews, folders, custom_path=vargs['path'])
elif resolved.kind == 'track':
tracks = [resolved]
elif resolved.kind == 'group':
group = resolved
group_id = str(group.id)
tracks = client.get('/groups/' + group_id + '/tracks', limit=200)
else:
artist = resolved
artist_id = str(artist.id)
tracks = client.get('/users/' + artist_id + '/tracks', limit=200)
if tracks == [] and artist.track_count > 0:
aggressive = True
filenames = []
# this might be buggy
data = get_soundcloud_api2_data(artist_id)
for track in data['collection']:
if len(filenames) >= num_tracks:
break
if track['type'] == 'playlist':
track['playlist']['tracks'] = track['playlist']['tracks'][:num_tracks]
for playlist_track in track['playlist']['tracks']:
album_name = track['playlist']['title']
filename = download_track(playlist_track, album_name, keep_previews, folders, filenames, custom_path=vargs['path'])
if filename:
filenames.append(filename)
else:
d_track = track['track']
filename = download_track(d_track, custom_path=vargs['path'])
if filename:
filenames.append(filename)
if not aggressive:
filenames = download_tracks(client, tracks, num_tracks, vargs['downloadable'], vargs['folders'], vargs['path'],
id3_extras=id3_extras)
if vargs['open']:
open_files(filenames) |
def mfccInitFilterBanks(fs, nfft):
"""
Computes the triangular filterbank for MFCC computation
(used in the stFeatureExtraction function before the stMFCC function call)
This function is taken from the scikits.talkbox library (MIT Licence):
https://pypi.python.org/pypi/scikits.talkbox
"""
# filter bank params:
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
if fs < 8000:
nlogfil = 5
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle:
freqs = numpy.zeros(nFiltTotal+2)
freqs[:numLinFiltTotal] = lowfreq + numpy.arange(numLinFiltTotal) * linsc
freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** numpy.arange(1, numLogFilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nFiltTotal, nfft))
nfreqs = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nFiltTotal):
lowTrFreq = freqs[i]
cenTrFreq = freqs[i+1]
highTrFreq = freqs[i+2]
lid = numpy.arange(numpy.floor(lowTrFreq * nfft / fs) + 1,
numpy.floor(cenTrFreq * nfft / fs) + 1,
dtype=numpy.int)
lslope = heights[i] / (cenTrFreq - lowTrFreq)
rid = numpy.arange(numpy.floor(cenTrFreq * nfft / fs) + 1,
numpy.floor(highTrFreq * nfft / fs) + 1,
dtype=numpy.int)
rslope = heights[i] / (highTrFreq - cenTrFreq)
fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)
fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])
return fbank, freqs | Computes the triangular filterbank for MFCC computation
(used in the stFeatureExtraction function before the stMFCC function call)
This function is taken from the scikits.talkbox library (MIT Licence):
https://pypi.python.org/pypi/scikits.talkbox | Below is the the instruction that describes the task:
### Input:
Computes the triangular filterbank for MFCC computation
(used in the stFeatureExtraction function before the stMFCC function call)
This function is taken from the scikits.talkbox library (MIT Licence):
https://pypi.python.org/pypi/scikits.talkbox
### Response:
def mfccInitFilterBanks(fs, nfft):
"""
Computes the triangular filterbank for MFCC computation
(used in the stFeatureExtraction function before the stMFCC function call)
This function is taken from the scikits.talkbox library (MIT Licence):
https://pypi.python.org/pypi/scikits.talkbox
"""
# filter bank params:
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
if fs < 8000:
nlogfil = 5
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle:
freqs = numpy.zeros(nFiltTotal+2)
freqs[:numLinFiltTotal] = lowfreq + numpy.arange(numLinFiltTotal) * linsc
freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** numpy.arange(1, numLogFilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nFiltTotal, nfft))
nfreqs = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nFiltTotal):
lowTrFreq = freqs[i]
cenTrFreq = freqs[i+1]
highTrFreq = freqs[i+2]
lid = numpy.arange(numpy.floor(lowTrFreq * nfft / fs) + 1,
numpy.floor(cenTrFreq * nfft / fs) + 1,
dtype=numpy.int)
lslope = heights[i] / (cenTrFreq - lowTrFreq)
rid = numpy.arange(numpy.floor(cenTrFreq * nfft / fs) + 1,
numpy.floor(highTrFreq * nfft / fs) + 1,
dtype=numpy.int)
rslope = heights[i] / (highTrFreq - cenTrFreq)
fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)
fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])
return fbank, freqs |
def get_gitignore_template(self, name):
"""
:calls: `GET /gitignore/templates/:name <http://developer.github.com/v3/gitignore>`_
:rtype: :class:`github.GitignoreTemplate.GitignoreTemplate`
"""
assert isinstance(name, (str, unicode)), name
headers, attributes = self.__requester.requestJsonAndCheck(
"GET",
"/gitignore/templates/" + name
)
return GitignoreTemplate.GitignoreTemplate(self.__requester, headers, attributes, completed=True) | :calls: `GET /gitignore/templates/:name <http://developer.github.com/v3/gitignore>`_
:rtype: :class:`github.GitignoreTemplate.GitignoreTemplate` | Below is the the instruction that describes the task:
### Input:
:calls: `GET /gitignore/templates/:name <http://developer.github.com/v3/gitignore>`_
:rtype: :class:`github.GitignoreTemplate.GitignoreTemplate`
### Response:
def get_gitignore_template(self, name):
"""
:calls: `GET /gitignore/templates/:name <http://developer.github.com/v3/gitignore>`_
:rtype: :class:`github.GitignoreTemplate.GitignoreTemplate`
"""
assert isinstance(name, (str, unicode)), name
headers, attributes = self.__requester.requestJsonAndCheck(
"GET",
"/gitignore/templates/" + name
)
return GitignoreTemplate.GitignoreTemplate(self.__requester, headers, attributes, completed=True) |
def set(self, time):
"""Sets time in seconds since Epoch
Args:
time (:obj:`float`): time in seconds since Epoch (see time.time())
Returns:
None
"""
self._time = time
self._pb.sec = int(self._time)
self._pb.nsec = int((self._time - self._pb.sec) * 10 ** 9) | Sets time in seconds since Epoch
Args:
time (:obj:`float`): time in seconds since Epoch (see time.time())
Returns:
None | Below is the the instruction that describes the task:
### Input:
Sets time in seconds since Epoch
Args:
time (:obj:`float`): time in seconds since Epoch (see time.time())
Returns:
None
### Response:
def set(self, time):
"""Sets time in seconds since Epoch
Args:
time (:obj:`float`): time in seconds since Epoch (see time.time())
Returns:
None
"""
self._time = time
self._pb.sec = int(self._time)
self._pb.nsec = int((self._time - self._pb.sec) * 10 ** 9) |
def check_dependencies():
"""Check external dependecies
Return a tuple with the available generators.
"""
available = []
try:
shell('ebook-convert')
available.append('calibre')
except OSError:
pass
try:
shell('pandoc --help')
available.append('pandoc')
except OSError:
pass
if not available:
sys.exit(error('No generator found, you cannot use md2ebook.'))
check_dependency_epubcheck()
return available | Check external dependecies
Return a tuple with the available generators. | Below is the the instruction that describes the task:
### Input:
Check external dependecies
Return a tuple with the available generators.
### Response:
def check_dependencies():
"""Check external dependecies
Return a tuple with the available generators.
"""
available = []
try:
shell('ebook-convert')
available.append('calibre')
except OSError:
pass
try:
shell('pandoc --help')
available.append('pandoc')
except OSError:
pass
if not available:
sys.exit(error('No generator found, you cannot use md2ebook.'))
check_dependency_epubcheck()
return available |
def get_subgraph_for_concept_pair(
self, source: str, target: str, cutoff: Optional[int] = None
):
""" Get subgraph comprised of simple paths between the source and the
target.
Args:
source
target
cutoff
"""
paths = nx.all_simple_paths(self, source, target, cutoff=cutoff)
return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths)))) | Get subgraph comprised of simple paths between the source and the
target.
Args:
source
target
cutoff | Below is the the instruction that describes the task:
### Input:
Get subgraph comprised of simple paths between the source and the
target.
Args:
source
target
cutoff
### Response:
def get_subgraph_for_concept_pair(
self, source: str, target: str, cutoff: Optional[int] = None
):
""" Get subgraph comprised of simple paths between the source and the
target.
Args:
source
target
cutoff
"""
paths = nx.all_simple_paths(self, source, target, cutoff=cutoff)
return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths)))) |
def best_motif_in_cluster(single_pwm, clus_pwm, clusters, fg_fa, background, stats=None, metrics=("roc_auc", "recall_at_fdr")):
"""Return the best motif per cluster for a clustering results.
The motif can be either the average motif or one of the clustered motifs.
Parameters
----------
single_pwm : str
Filename of motifs.
clus_pwm : str
Filename of motifs.
clusters :
Motif clustering result.
fg_fa : str
Filename of FASTA file.
background : dict
Dictionary for background file names.
stats : dict, optional
If statistics are not supplied they will be computed.
metrics : sequence, optional
Metrics to use for motif evaluation. Default are "roc_auc" and
"recall_at_fdr".
Returns
-------
motifs : list
List of Motif instances.
"""
# combine original and clustered motifs
motifs = read_motifs(single_pwm) + read_motifs(clus_pwm)
motifs = dict([(str(m), m) for m in motifs])
# get the statistics for those motifs that were not yet checked
clustered_motifs = []
for clus,singles in clusters:
for motif in set([clus] + singles):
if str(motif) not in stats:
clustered_motifs.append(motifs[str(motif)])
new_stats = {}
for bg, bg_fa in background.items():
for m,s in calc_stats(clustered_motifs, fg_fa, bg_fa).items():
if m not in new_stats:
new_stats[m] = {}
new_stats[m][bg] = s
stats.update(new_stats)
rank = rank_motifs(stats, metrics)
# rank the motifs
best_motifs = []
for clus, singles in clusters:
if len(singles) > 1:
eval_motifs = singles
if clus not in motifs:
eval_motifs.append(clus)
eval_motifs = [motifs[str(e)] for e in eval_motifs]
best_motif = sorted(eval_motifs, key=lambda x: rank[str(x)])[-1]
best_motifs.append(best_motif)
else:
best_motifs.append(clus)
for bg in background:
stats[str(best_motifs[-1])][bg]["num_cluster"] = len(singles)
best_motifs = sorted(best_motifs, key=lambda x: rank[str(x)], reverse=True)
return best_motifs | Return the best motif per cluster for a clustering results.
The motif can be either the average motif or one of the clustered motifs.
Parameters
----------
single_pwm : str
Filename of motifs.
clus_pwm : str
Filename of motifs.
clusters :
Motif clustering result.
fg_fa : str
Filename of FASTA file.
background : dict
Dictionary for background file names.
stats : dict, optional
If statistics are not supplied they will be computed.
metrics : sequence, optional
Metrics to use for motif evaluation. Default are "roc_auc" and
"recall_at_fdr".
Returns
-------
motifs : list
List of Motif instances. | Below is the the instruction that describes the task:
### Input:
Return the best motif per cluster for a clustering results.
The motif can be either the average motif or one of the clustered motifs.
Parameters
----------
single_pwm : str
Filename of motifs.
clus_pwm : str
Filename of motifs.
clusters :
Motif clustering result.
fg_fa : str
Filename of FASTA file.
background : dict
Dictionary for background file names.
stats : dict, optional
If statistics are not supplied they will be computed.
metrics : sequence, optional
Metrics to use for motif evaluation. Default are "roc_auc" and
"recall_at_fdr".
Returns
-------
motifs : list
List of Motif instances.
### Response:
def best_motif_in_cluster(single_pwm, clus_pwm, clusters, fg_fa, background, stats=None, metrics=("roc_auc", "recall_at_fdr")):
"""Return the best motif per cluster for a clustering results.
The motif can be either the average motif or one of the clustered motifs.
Parameters
----------
single_pwm : str
Filename of motifs.
clus_pwm : str
Filename of motifs.
clusters :
Motif clustering result.
fg_fa : str
Filename of FASTA file.
background : dict
Dictionary for background file names.
stats : dict, optional
If statistics are not supplied they will be computed.
metrics : sequence, optional
Metrics to use for motif evaluation. Default are "roc_auc" and
"recall_at_fdr".
Returns
-------
motifs : list
List of Motif instances.
"""
# combine original and clustered motifs
motifs = read_motifs(single_pwm) + read_motifs(clus_pwm)
motifs = dict([(str(m), m) for m in motifs])
# get the statistics for those motifs that were not yet checked
clustered_motifs = []
for clus,singles in clusters:
for motif in set([clus] + singles):
if str(motif) not in stats:
clustered_motifs.append(motifs[str(motif)])
new_stats = {}
for bg, bg_fa in background.items():
for m,s in calc_stats(clustered_motifs, fg_fa, bg_fa).items():
if m not in new_stats:
new_stats[m] = {}
new_stats[m][bg] = s
stats.update(new_stats)
rank = rank_motifs(stats, metrics)
# rank the motifs
best_motifs = []
for clus, singles in clusters:
if len(singles) > 1:
eval_motifs = singles
if clus not in motifs:
eval_motifs.append(clus)
eval_motifs = [motifs[str(e)] for e in eval_motifs]
best_motif = sorted(eval_motifs, key=lambda x: rank[str(x)])[-1]
best_motifs.append(best_motif)
else:
best_motifs.append(clus)
for bg in background:
stats[str(best_motifs[-1])][bg]["num_cluster"] = len(singles)
best_motifs = sorted(best_motifs, key=lambda x: rank[str(x)], reverse=True)
return best_motifs |
def _tool_from_string(name):
""" Takes a string and returns a corresponding `Tool` instance. """
known_tools = sorted(_known_tools.keys())
if name in known_tools:
tool_fn = _known_tools[name]
if isinstance(tool_fn, string_types):
tool_fn = _known_tools[tool_fn]
return tool_fn()
else:
matches, text = difflib.get_close_matches(name.lower(), known_tools), "similar"
if not matches:
matches, text = known_tools, "possible"
raise ValueError("unexpected tool name '%s', %s tools are %s" % (name, text, nice_join(matches))) | Takes a string and returns a corresponding `Tool` instance. | Below is the the instruction that describes the task:
### Input:
Takes a string and returns a corresponding `Tool` instance.
### Response:
def _tool_from_string(name):
""" Takes a string and returns a corresponding `Tool` instance. """
known_tools = sorted(_known_tools.keys())
if name in known_tools:
tool_fn = _known_tools[name]
if isinstance(tool_fn, string_types):
tool_fn = _known_tools[tool_fn]
return tool_fn()
else:
matches, text = difflib.get_close_matches(name.lower(), known_tools), "similar"
if not matches:
matches, text = known_tools, "possible"
raise ValueError("unexpected tool name '%s', %s tools are %s" % (name, text, nice_join(matches))) |
def _logout(self):
"""Logout from current session(token)
This functions doesn't return any boolean value, since it can 'fail' for anonymous logins
"""
self.log.debug("Logging out from session ID: %s" % self._token)
try:
info = self._xmlrpc_server.LogOut(self._token)
self.log.debug("Logout ended in %s with status: %s" %
(info['seconds'], info['status']))
except ProtocolError as e:
self.log.debug("error in HTTP/HTTPS transport layer")
raise
except Fault as e:
self.log.debug("error in xml-rpc server")
raise
except:
self.log.exception("Connection to the server failed/other error")
raise
finally:
# force token reset
self._token = None | Logout from current session(token)
This functions doesn't return any boolean value, since it can 'fail' for anonymous logins | Below is the the instruction that describes the task:
### Input:
Logout from current session(token)
This functions doesn't return any boolean value, since it can 'fail' for anonymous logins
### Response:
def _logout(self):
"""Logout from current session(token)
This functions doesn't return any boolean value, since it can 'fail' for anonymous logins
"""
self.log.debug("Logging out from session ID: %s" % self._token)
try:
info = self._xmlrpc_server.LogOut(self._token)
self.log.debug("Logout ended in %s with status: %s" %
(info['seconds'], info['status']))
except ProtocolError as e:
self.log.debug("error in HTTP/HTTPS transport layer")
raise
except Fault as e:
self.log.debug("error in xml-rpc server")
raise
except:
self.log.exception("Connection to the server failed/other error")
raise
finally:
# force token reset
self._token = None |
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pycodestyle sometimes reports columns that
# goes past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed | Remove whitespace around parameter '=' sign. | Below is the the instruction that describes the task:
### Input:
Remove whitespace around parameter '=' sign.
### Response:
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pycodestyle sometimes reports columns that
# goes past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed |
def calc_distribution_stats(x):
"""Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
"""
return pd.Series({'mean': np.mean(x),
'median': np.median(x),
'std': np.std(x),
'5%': np.percentile(x, 5),
'25%': np.percentile(x, 25),
'75%': np.percentile(x, 75),
'95%': np.percentile(x, 95),
'IQR': np.subtract.reduce(
np.percentile(x, [75, 25])),
}) | Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values. | Below is the the instruction that describes the task:
### Input:
Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
### Response:
def calc_distribution_stats(x):
"""Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
"""
return pd.Series({'mean': np.mean(x),
'median': np.median(x),
'std': np.std(x),
'5%': np.percentile(x, 5),
'25%': np.percentile(x, 25),
'75%': np.percentile(x, 75),
'95%': np.percentile(x, 95),
'IQR': np.subtract.reduce(
np.percentile(x, [75, 25])),
}) |
def show_errors(self):
"""
Loop over configuration warnings and log them as INFO log
Loop over configuration errors and log them as INFO log
Note that the warnings and errors are logged on the fly during the configuration parsing.
It is not necessary to log as WARNING and ERROR in this function which is used as a sum-up
on the end of configuration parsing when an error has been detected.
:return: None
"""
if self.configuration_warnings:
logger.warning("Configuration warnings:")
for msg in self.configuration_warnings:
logger.warning(msg)
if self.configuration_errors:
logger.warning("Configuration errors:")
for msg in self.configuration_errors:
logger.warning(msg) | Loop over configuration warnings and log them as INFO log
Loop over configuration errors and log them as INFO log
Note that the warnings and errors are logged on the fly during the configuration parsing.
It is not necessary to log as WARNING and ERROR in this function which is used as a sum-up
on the end of configuration parsing when an error has been detected.
:return: None | Below is the the instruction that describes the task:
### Input:
Loop over configuration warnings and log them as INFO log
Loop over configuration errors and log them as INFO log
Note that the warnings and errors are logged on the fly during the configuration parsing.
It is not necessary to log as WARNING and ERROR in this function which is used as a sum-up
on the end of configuration parsing when an error has been detected.
:return: None
### Response:
def show_errors(self):
"""
Loop over configuration warnings and log them as INFO log
Loop over configuration errors and log them as INFO log
Note that the warnings and errors are logged on the fly during the configuration parsing.
It is not necessary to log as WARNING and ERROR in this function which is used as a sum-up
on the end of configuration parsing when an error has been detected.
:return: None
"""
if self.configuration_warnings:
logger.warning("Configuration warnings:")
for msg in self.configuration_warnings:
logger.warning(msg)
if self.configuration_errors:
logger.warning("Configuration errors:")
for msg in self.configuration_errors:
logger.warning(msg) |
def packageipa(env, console):
"""
Package the built app as an ipa for distribution in iOS App Store
"""
ipa_path, app_path = _get_ipa(env)
output_dir = path.dirname(ipa_path)
if path.exists(ipa_path):
console.quiet('Removing %s' % ipa_path)
os.remove(ipa_path)
zf = zipfile.ZipFile(ipa_path, mode='w')
payload_dir = 'Payload'
for (dirpath, dirnames, filenames) in os.walk(app_path):
for filename in filenames:
filepath = path.join(dirpath, filename)
prefix = path.commonprefix([filepath, path.dirname(app_path)])
write_path = path.join(payload_dir, filepath[len(prefix) + 1:])
console.quiet('Write %s' % write_path)
zf.write(filepath, write_path)
zf.close()
console.quiet('Packaged %s' % ipa_path) | Package the built app as an ipa for distribution in iOS App Store | Below is the the instruction that describes the task:
### Input:
Package the built app as an ipa for distribution in iOS App Store
### Response:
def packageipa(env, console):
"""
Package the built app as an ipa for distribution in iOS App Store
"""
ipa_path, app_path = _get_ipa(env)
output_dir = path.dirname(ipa_path)
if path.exists(ipa_path):
console.quiet('Removing %s' % ipa_path)
os.remove(ipa_path)
zf = zipfile.ZipFile(ipa_path, mode='w')
payload_dir = 'Payload'
for (dirpath, dirnames, filenames) in os.walk(app_path):
for filename in filenames:
filepath = path.join(dirpath, filename)
prefix = path.commonprefix([filepath, path.dirname(app_path)])
write_path = path.join(payload_dir, filepath[len(prefix) + 1:])
console.quiet('Write %s' % write_path)
zf.write(filepath, write_path)
zf.close()
console.quiet('Packaged %s' % ipa_path) |
def get_gravatar(email, size=80, rating='g', default=None,
protocol=PROTOCOL):
"""
Return url for a Gravatar.
"""
gravatar_protocols = {'http': 'http://www',
'https': 'https://secure'}
url = '%s.gravatar.com/avatar/%s' % (
gravatar_protocols[protocol],
md5(email.strip().lower().encode('utf-8')).hexdigest())
options = {'s': size, 'r': rating}
if default:
options['d'] = default
url = '%s?%s' % (url, urlencode(options))
return url.replace('&', '&') | Return url for a Gravatar. | Below is the the instruction that describes the task:
### Input:
Return url for a Gravatar.
### Response:
def get_gravatar(email, size=80, rating='g', default=None,
protocol=PROTOCOL):
"""
Return url for a Gravatar.
"""
gravatar_protocols = {'http': 'http://www',
'https': 'https://secure'}
url = '%s.gravatar.com/avatar/%s' % (
gravatar_protocols[protocol],
md5(email.strip().lower().encode('utf-8')).hexdigest())
options = {'s': size, 'r': rating}
if default:
options['d'] = default
url = '%s?%s' % (url, urlencode(options))
return url.replace('&', '&') |
def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = [
(PROV['type'], self.type),
]
atts += [
(PROV['label'], self.label)
]
if self.extent_rsl is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_RESELS, self.extent_rsl),
]
if self.extent is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_VOXELS, self.extent),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr),
(NIDM_P_VALUE_FWER, self.p_corr)
]
else:
atts += [
(PROV['type'], self.threshold_type)
]
if self.value is not None:
atts += [
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None]) | Create prov entities and activities. | Below is the the instruction that describes the task:
### Input:
Create prov entities and activities.
### Response:
def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = [
(PROV['type'], self.type),
]
atts += [
(PROV['label'], self.label)
]
if self.extent_rsl is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_RESELS, self.extent_rsl),
]
if self.extent is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_VOXELS, self.extent),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr),
(NIDM_P_VALUE_FWER, self.p_corr)
]
else:
atts += [
(PROV['type'], self.threshold_type)
]
if self.value is not None:
atts += [
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None]) |
def interpolate_motor_positions(self, input_key, nearest_keys):
""" Process linear interpolation to estimate actual speed and position of motors
Method specific to the :meth:~pypot.primitive.move.Move.position() structure
it is a KDTreeDict[timestamp] = {dict[motor]=(position,speed)}
"""
# TODO : to be rewrited with more style (map ?)
if len(nearest_keys) == 1:
return self[nearest_keys[0]]
elif len(nearest_keys) == 0:
raise KeyError('key {} exceed distance_upper_bound {}'.format(
input_key, self.distance_upper_bound))
elif len(nearest_keys) != 2:
raise NotImplementedError("interpolation works only for k_neighbors = 2")
elif nearest_keys[0] == nearest_keys[1]:
# Bug from nearest key ?
return self[nearest_keys[0]]
# Problem if ValueError: A value in x_new is above the interpolation range.
elif input_key < min(nearest_keys):
return self[min(nearest_keys)]
elif input_key > max(nearest_keys):
return self[max(nearest_keys)]
interpolated_positions = {}
for (k, v), (k2, v2) in zip(self[nearest_keys[0]].items(), self[nearest_keys[1]].items()):
if k == k2:
x = np.array(nearest_keys)
y_pos = np.array([v[0], v2[0]])
y_speed = np.array([v[1], v2[1]])
f_pos = interp1d(x, y_pos, bounds_error=False)
f_speed = interp1d(x, y_speed, bounds_error=False)
# print k, input_key, (float(f_pos(input_key[0])), float(f_speed(input_key[0])))
interpolated_positions[k] = (f_pos(input_key), f_speed(input_key))
else:
raise IndexError("key are not identics. Motor added during the record ?")
return interpolated_positions | Process linear interpolation to estimate actual speed and position of motors
Method specific to the :meth:~pypot.primitive.move.Move.position() structure
it is a KDTreeDict[timestamp] = {dict[motor]=(position,speed)} | Below is the the instruction that describes the task:
### Input:
Process linear interpolation to estimate actual speed and position of motors
Method specific to the :meth:~pypot.primitive.move.Move.position() structure
it is a KDTreeDict[timestamp] = {dict[motor]=(position,speed)}
### Response:
def interpolate_motor_positions(self, input_key, nearest_keys):
""" Process linear interpolation to estimate actual speed and position of motors
Method specific to the :meth:~pypot.primitive.move.Move.position() structure
it is a KDTreeDict[timestamp] = {dict[motor]=(position,speed)}
"""
# TODO : to be rewrited with more style (map ?)
if len(nearest_keys) == 1:
return self[nearest_keys[0]]
elif len(nearest_keys) == 0:
raise KeyError('key {} exceed distance_upper_bound {}'.format(
input_key, self.distance_upper_bound))
elif len(nearest_keys) != 2:
raise NotImplementedError("interpolation works only for k_neighbors = 2")
elif nearest_keys[0] == nearest_keys[1]:
# Bug from nearest key ?
return self[nearest_keys[0]]
# Problem if ValueError: A value in x_new is above the interpolation range.
elif input_key < min(nearest_keys):
return self[min(nearest_keys)]
elif input_key > max(nearest_keys):
return self[max(nearest_keys)]
interpolated_positions = {}
for (k, v), (k2, v2) in zip(self[nearest_keys[0]].items(), self[nearest_keys[1]].items()):
if k == k2:
x = np.array(nearest_keys)
y_pos = np.array([v[0], v2[0]])
y_speed = np.array([v[1], v2[1]])
f_pos = interp1d(x, y_pos, bounds_error=False)
f_speed = interp1d(x, y_speed, bounds_error=False)
# print k, input_key, (float(f_pos(input_key[0])), float(f_speed(input_key[0])))
interpolated_positions[k] = (f_pos(input_key), f_speed(input_key))
else:
raise IndexError("key are not identics. Motor added during the record ?")
return interpolated_positions |
def analyse(self, name):
"""
reads the specified file.
:param name: the name.
:return: the analysis as frequency/Pxx.
"""
if name in self._cache:
target = self._cache[name]
if target['type'] == 'wav':
signal = self._uploadController.loadSignal(target['filename'],
start=target['start'] if target['start'] != 'start' else None,
end=target['end'] if target['end'] != 'end' else None)
if signal is not None:
# TODO allow user defined window
return getattr(signal, target['analysis'])(ref=1.0)
else:
return None, 404
pass
elif target['type'] == 'hinge':
hingePoints = np.array(target['hinge']).astype(np.float64)
x = hingePoints[:, 1]
y = hingePoints[:, 0]
# extend as straight line from 0 to 500
if x[0] != 0:
x = np.insert(x, 0, 0.0000001)
y = np.insert(y, 0, y[0])
if x[-1] != 500:
x = np.insert(x, len(x), 500.0)
y = np.insert(y, len(y), y[-1])
# convert the y axis dB values into a linear value
y = 10 ** (y / 10)
# perform a logspace interpolation
f = self.log_interp1d(x, y)
# remap to 0-500
xnew = np.linspace(x[0], x[-1], num=500, endpoint=False)
# and convert back to dB
return xnew, 10 * np.log10(f(xnew))
else:
logger.error('Unknown target type with name ' + name)
return None | reads the specified file.
:param name: the name.
:return: the analysis as frequency/Pxx. | Below is the the instruction that describes the task:
### Input:
reads the specified file.
:param name: the name.
:return: the analysis as frequency/Pxx.
### Response:
def analyse(self, name):
"""
reads the specified file.
:param name: the name.
:return: the analysis as frequency/Pxx.
"""
if name in self._cache:
target = self._cache[name]
if target['type'] == 'wav':
signal = self._uploadController.loadSignal(target['filename'],
start=target['start'] if target['start'] != 'start' else None,
end=target['end'] if target['end'] != 'end' else None)
if signal is not None:
# TODO allow user defined window
return getattr(signal, target['analysis'])(ref=1.0)
else:
return None, 404
pass
elif target['type'] == 'hinge':
hingePoints = np.array(target['hinge']).astype(np.float64)
x = hingePoints[:, 1]
y = hingePoints[:, 0]
# extend as straight line from 0 to 500
if x[0] != 0:
x = np.insert(x, 0, 0.0000001)
y = np.insert(y, 0, y[0])
if x[-1] != 500:
x = np.insert(x, len(x), 500.0)
y = np.insert(y, len(y), y[-1])
# convert the y axis dB values into a linear value
y = 10 ** (y / 10)
# perform a logspace interpolation
f = self.log_interp1d(x, y)
# remap to 0-500
xnew = np.linspace(x[0], x[-1], num=500, endpoint=False)
# and convert back to dB
return xnew, 10 * np.log10(f(xnew))
else:
logger.error('Unknown target type with name ' + name)
return None |
def recv_task_request_from_workers(self):
""" Receives 1 task request from MPI comm
Returns:
--------
worker_rank: worker_rank id
"""
info = MPI.Status()
comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
worker_rank = info.Get_source()
logger.info("Received task request from worker:{}".format(worker_rank))
return worker_rank | Receives 1 task request from MPI comm
Returns:
--------
worker_rank: worker_rank id | Below is the the instruction that describes the task:
### Input:
Receives 1 task request from MPI comm
Returns:
--------
worker_rank: worker_rank id
### Response:
def recv_task_request_from_workers(self):
""" Receives 1 task request from MPI comm
Returns:
--------
worker_rank: worker_rank id
"""
info = MPI.Status()
comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
worker_rank = info.Get_source()
logger.info("Received task request from worker:{}".format(worker_rank))
return worker_rank |
def get_tokens(self, locations=None, extent=None):
"""Obtain tokens in this translation unit.
This is a generator for Token instances. The caller specifies a range
of source code to obtain tokens for. The range can be specified as a
2-tuple of SourceLocation or as a SourceRange. If both are defined,
behavior is undefined.
"""
if locations is not None:
extent = SourceRange(start=locations[0], end=locations[1])
return TokenGroup.get_tokens(self, extent) | Obtain tokens in this translation unit.
This is a generator for Token instances. The caller specifies a range
of source code to obtain tokens for. The range can be specified as a
2-tuple of SourceLocation or as a SourceRange. If both are defined,
behavior is undefined. | Below is the the instruction that describes the task:
### Input:
Obtain tokens in this translation unit.
This is a generator for Token instances. The caller specifies a range
of source code to obtain tokens for. The range can be specified as a
2-tuple of SourceLocation or as a SourceRange. If both are defined,
behavior is undefined.
### Response:
def get_tokens(self, locations=None, extent=None):
"""Obtain tokens in this translation unit.
This is a generator for Token instances. The caller specifies a range
of source code to obtain tokens for. The range can be specified as a
2-tuple of SourceLocation or as a SourceRange. If both are defined,
behavior is undefined.
"""
if locations is not None:
extent = SourceRange(start=locations[0], end=locations[1])
return TokenGroup.get_tokens(self, extent) |
def day_publications_card(date):
"""
Displays Publications that were being read on `date`.
`date` is a date tobject.
"""
d = date.strftime(app_settings.DATE_FORMAT)
card_title = 'Reading on {}'.format(d)
return {
'card_title': card_title,
'publication_list': day_publications(date=date),
} | Displays Publications that were being read on `date`.
`date` is a date tobject. | Below is the the instruction that describes the task:
### Input:
Displays Publications that were being read on `date`.
`date` is a date tobject.
### Response:
def day_publications_card(date):
"""
Displays Publications that were being read on `date`.
`date` is a date tobject.
"""
d = date.strftime(app_settings.DATE_FORMAT)
card_title = 'Reading on {}'.format(d)
return {
'card_title': card_title,
'publication_list': day_publications(date=date),
} |
def synchronize(self):
"""Synchronizes Router DB from Neturon DB with EOS.
Walks through the Neturon Db and ensures that all the routers
created in Netuton DB match with EOS. After creating appropriate
routers, it ensures to add interfaces as well.
Uses idempotent properties of EOS configuration, which means
same commands can be repeated.
"""
LOG.info(_LI('Syncing Neutron Router DB <-> EOS'))
routers, router_interfaces = self.get_routers_and_interfaces()
expected_vrfs = set()
if self._use_vrf:
expected_vrfs.update(self.driver._arista_router_name(
r['id'], r['name']) for r in routers)
expected_vlans = set(r['seg_id'] for r in router_interfaces)
if self._enable_cleanup:
self.do_cleanup(expected_vrfs, expected_vlans)
self.create_routers(routers)
self.create_router_interfaces(router_interfaces) | Synchronizes Router DB from Neturon DB with EOS.
Walks through the Neturon Db and ensures that all the routers
created in Netuton DB match with EOS. After creating appropriate
routers, it ensures to add interfaces as well.
Uses idempotent properties of EOS configuration, which means
same commands can be repeated. | Below is the the instruction that describes the task:
### Input:
Synchronizes Router DB from Neturon DB with EOS.
Walks through the Neturon Db and ensures that all the routers
created in Netuton DB match with EOS. After creating appropriate
routers, it ensures to add interfaces as well.
Uses idempotent properties of EOS configuration, which means
same commands can be repeated.
### Response:
def synchronize(self):
"""Synchronizes Router DB from Neturon DB with EOS.
Walks through the Neturon Db and ensures that all the routers
created in Netuton DB match with EOS. After creating appropriate
routers, it ensures to add interfaces as well.
Uses idempotent properties of EOS configuration, which means
same commands can be repeated.
"""
LOG.info(_LI('Syncing Neutron Router DB <-> EOS'))
routers, router_interfaces = self.get_routers_and_interfaces()
expected_vrfs = set()
if self._use_vrf:
expected_vrfs.update(self.driver._arista_router_name(
r['id'], r['name']) for r in routers)
expected_vlans = set(r['seg_id'] for r in router_interfaces)
if self._enable_cleanup:
self.do_cleanup(expected_vrfs, expected_vlans)
self.create_routers(routers)
self.create_router_interfaces(router_interfaces) |
def _initialize_precalculated_series(self,
asset,
trading_calendar,
trading_days,
data_portal):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return (
benchmark_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
benchmark_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
returns = benchmark_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
'open',
trading_days[0],
'daily',
)
first_close = data_portal.get_spot_value(
asset,
'close',
trading_days[0],
'daily',
)
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
'cannot set benchmark to asset that does not exist during'
' the simulation period (asset start date=%r)' % start_date
) | Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute | Below is the the instruction that describes the task:
### Input:
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
### Response:
def _initialize_precalculated_series(self,
asset,
trading_calendar,
trading_days,
data_portal):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return (
benchmark_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
benchmark_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
returns = benchmark_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
'open',
trading_days[0],
'daily',
)
first_close = data_portal.get_spot_value(
asset,
'close',
trading_days[0],
'daily',
)
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
'cannot set benchmark to asset that does not exist during'
' the simulation period (asset start date=%r)' % start_date
) |
def persistent_attributes(self):
# type: () -> Dict[str, object]
"""Attributes stored at the Persistence level of the skill lifecycle.
:return: persistent_attributes retrieved from persistence adapter
:rtype: Dict[str, object]
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to get persistent attributes without persistence adapter
"""
if not self._persistence_adapter:
raise AttributesManagerException(
"Cannot get PersistentAttributes without Persistence adapter")
if not self._persistent_attributes_set:
self._persistence_attributes = (
self._persistence_adapter.get_attributes(
request_envelope=self._request_envelope))
self._persistent_attributes_set = True
return self._persistence_attributes | Attributes stored at the Persistence level of the skill lifecycle.
:return: persistent_attributes retrieved from persistence adapter
:rtype: Dict[str, object]
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to get persistent attributes without persistence adapter | Below is the the instruction that describes the task:
### Input:
Attributes stored at the Persistence level of the skill lifecycle.
:return: persistent_attributes retrieved from persistence adapter
:rtype: Dict[str, object]
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to get persistent attributes without persistence adapter
### Response:
def persistent_attributes(self):
# type: () -> Dict[str, object]
"""Attributes stored at the Persistence level of the skill lifecycle.
:return: persistent_attributes retrieved from persistence adapter
:rtype: Dict[str, object]
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to get persistent attributes without persistence adapter
"""
if not self._persistence_adapter:
raise AttributesManagerException(
"Cannot get PersistentAttributes without Persistence adapter")
if not self._persistent_attributes_set:
self._persistence_attributes = (
self._persistence_adapter.get_attributes(
request_envelope=self._request_envelope))
self._persistent_attributes_set = True
return self._persistence_attributes |
def singleton(func):
"""
This decorator allows you to make sure that a function is called once and
only once. Note that recursive functions will still work.
WARNING: Not thread-safe!!!
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if wrapper.__call_state__ == CALLED:
return
ret = func(*args, **kwargs)
wrapper.__call_state__ = CALLED
return ret
def reset():
wrapper.__call_state__ = NOT_CALLED
wrapper.reset = reset
reset()
# save original func to be able to patch and restore multiple times from
# unit tests
wrapper.__original_func = func
return wrapper | This decorator allows you to make sure that a function is called once and
only once. Note that recursive functions will still work.
WARNING: Not thread-safe!!! | Below is the the instruction that describes the task:
### Input:
This decorator allows you to make sure that a function is called once and
only once. Note that recursive functions will still work.
WARNING: Not thread-safe!!!
### Response:
def singleton(func):
"""
This decorator allows you to make sure that a function is called once and
only once. Note that recursive functions will still work.
WARNING: Not thread-safe!!!
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if wrapper.__call_state__ == CALLED:
return
ret = func(*args, **kwargs)
wrapper.__call_state__ = CALLED
return ret
def reset():
wrapper.__call_state__ = NOT_CALLED
wrapper.reset = reset
reset()
# save original func to be able to patch and restore multiple times from
# unit tests
wrapper.__original_func = func
return wrapper |
def find_source_lines(self):
"""Mark all executable source lines in fn as executed 0 times."""
strs = trace.find_strings(self.filename)
lines = trace.find_lines_from_code(self.fn.__code__, strs)
self.firstcodelineno = sys.maxint
for lineno in lines:
self.firstcodelineno = min(self.firstcodelineno, lineno)
self.sourcelines.setdefault(lineno, 0)
if self.firstcodelineno == sys.maxint:
self.firstcodelineno = self.firstlineno | Mark all executable source lines in fn as executed 0 times. | Below is the the instruction that describes the task:
### Input:
Mark all executable source lines in fn as executed 0 times.
### Response:
def find_source_lines(self):
"""Mark all executable source lines in fn as executed 0 times."""
strs = trace.find_strings(self.filename)
lines = trace.find_lines_from_code(self.fn.__code__, strs)
self.firstcodelineno = sys.maxint
for lineno in lines:
self.firstcodelineno = min(self.firstcodelineno, lineno)
self.sourcelines.setdefault(lineno, 0)
if self.firstcodelineno == sys.maxint:
self.firstcodelineno = self.firstlineno |
def distinct_letters(string_matrix: List[List[str]]) -> Set[str]:
"""
Diagnostic function
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> dl = distinct_letters([['the', 'quick', 'brown'],['how', 'now', 'cow']])
>>> sorted(dl)
['b', 'c', 'e', 'h', 'i', 'k', 'n', 'o', 'q', 'r', 't', 'u', 'w']
"""
return set([letter
for sentence in string_matrix
for word in sentence
for letter in word]) | Diagnostic function
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> dl = distinct_letters([['the', 'quick', 'brown'],['how', 'now', 'cow']])
>>> sorted(dl)
['b', 'c', 'e', 'h', 'i', 'k', 'n', 'o', 'q', 'r', 't', 'u', 'w'] | Below is the the instruction that describes the task:
### Input:
Diagnostic function
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> dl = distinct_letters([['the', 'quick', 'brown'],['how', 'now', 'cow']])
>>> sorted(dl)
['b', 'c', 'e', 'h', 'i', 'k', 'n', 'o', 'q', 'r', 't', 'u', 'w']
### Response:
def distinct_letters(string_matrix: List[List[str]]) -> Set[str]:
"""
Diagnostic function
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> dl = distinct_letters([['the', 'quick', 'brown'],['how', 'now', 'cow']])
>>> sorted(dl)
['b', 'c', 'e', 'h', 'i', 'k', 'n', 'o', 'q', 'r', 't', 'u', 'w']
"""
return set([letter
for sentence in string_matrix
for word in sentence
for letter in word]) |
def step(self, thumb=False):
"""Executes a single step.
Steps even if there is a breakpoint.
Args:
self (JLink): the ``JLink`` instance
thumb (bool): boolean indicating if to step in thumb mode
Returns:
``None``
Raises:
JLinkException: on error
"""
method = self._dll.JLINKARM_Step
if thumb:
method = self._dll.JLINKARM_StepComposite
res = method()
if res != 0:
raise errors.JLinkException("Failed to step over instruction.")
return None | Executes a single step.
Steps even if there is a breakpoint.
Args:
self (JLink): the ``JLink`` instance
thumb (bool): boolean indicating if to step in thumb mode
Returns:
``None``
Raises:
JLinkException: on error | Below is the the instruction that describes the task:
### Input:
Executes a single step.
Steps even if there is a breakpoint.
Args:
self (JLink): the ``JLink`` instance
thumb (bool): boolean indicating if to step in thumb mode
Returns:
``None``
Raises:
JLinkException: on error
### Response:
def step(self, thumb=False):
"""Executes a single step.
Steps even if there is a breakpoint.
Args:
self (JLink): the ``JLink`` instance
thumb (bool): boolean indicating if to step in thumb mode
Returns:
``None``
Raises:
JLinkException: on error
"""
method = self._dll.JLINKARM_Step
if thumb:
method = self._dll.JLINKARM_StepComposite
res = method()
if res != 0:
raise errors.JLinkException("Failed to step over instruction.")
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.