code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def portalAdmin(self):
"""gets a reference to a portal administration class"""
from ..manageportal import PortalAdministration
return PortalAdministration(admin_url="https://%s/portaladmin" % self.portalHostname,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=False) | gets a reference to a portal administration class | Below is the the instruction that describes the task:
### Input:
gets a reference to a portal administration class
### Response:
def portalAdmin(self):
"""gets a reference to a portal administration class"""
from ..manageportal import PortalAdministration
return PortalAdministration(admin_url="https://%s/portaladmin" % self.portalHostname,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=False) |
def MI_deleteInstance(self,
env,
instanceName):
# pylint: disable=invalid-name
"""Delete a CIM instance
Implements the WBEM operation DeleteInstance in terms
of the delete_instance method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider MI_deleteInstance called...')
self.delete_instance(env=env, instance_name=instanceName)
logger.log_debug('CIMProvider MI_deleteInstance returning') | Delete a CIM instance
Implements the WBEM operation DeleteInstance in terms
of the delete_instance method. A derived class will not normally
override this method. | Below is the the instruction that describes the task:
### Input:
Delete a CIM instance
Implements the WBEM operation DeleteInstance in terms
of the delete_instance method. A derived class will not normally
override this method.
### Response:
def MI_deleteInstance(self,
env,
instanceName):
# pylint: disable=invalid-name
"""Delete a CIM instance
Implements the WBEM operation DeleteInstance in terms
of the delete_instance method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider MI_deleteInstance called...')
self.delete_instance(env=env, instance_name=instanceName)
logger.log_debug('CIMProvider MI_deleteInstance returning') |
def fileUpd(self, buffer=None, filename=None, ufilename=None, desc=None):
"""Update annotation attached file."""
CheckParent(self)
return _fitz.Annot_fileUpd(self, buffer, filename, ufilename, desc) | Update annotation attached file. | Below is the the instruction that describes the task:
### Input:
Update annotation attached file.
### Response:
def fileUpd(self, buffer=None, filename=None, ufilename=None, desc=None):
"""Update annotation attached file."""
CheckParent(self)
return _fitz.Annot_fileUpd(self, buffer, filename, ufilename, desc) |
def get_comments(self):
"""Gets the comment list resulting from a search.
return: (osid.commenting.CommentList) - the comment list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.CommentList(self._results, runtime=self._runtime) | Gets the comment list resulting from a search.
return: (osid.commenting.CommentList) - the comment list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the comment list resulting from a search.
return: (osid.commenting.CommentList) - the comment list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_comments(self):
"""Gets the comment list resulting from a search.
return: (osid.commenting.CommentList) - the comment list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.CommentList(self._results, runtime=self._runtime) |
def get_mock_personalization_dict():
"""Get a dict of personalization mock."""
mock_pers = dict()
mock_pers['to_list'] = [To("[email protected]",
"Example User"),
To("[email protected]",
"Example User")]
mock_pers['cc_list'] = [To("[email protected]",
"Example User"),
To("[email protected]",
"Example User")]
mock_pers['bcc_list'] = [To("[email protected]"),
To("[email protected]")]
mock_pers['subject'] = ("Hello World from the Personalized "
"SendGrid Python Library")
mock_pers['headers'] = [Header("X-Test", "test"),
Header("X-Mock", "true")]
mock_pers['substitutions'] = [Substitution("%name%", "Example User"),
Substitution("%city%", "Denver")]
mock_pers['custom_args'] = [CustomArg("user_id", "343"),
CustomArg("type", "marketing")]
mock_pers['send_at'] = 1443636843
return mock_pers | Get a dict of personalization mock. | Below is the the instruction that describes the task:
### Input:
Get a dict of personalization mock.
### Response:
def get_mock_personalization_dict():
"""Get a dict of personalization mock."""
mock_pers = dict()
mock_pers['to_list'] = [To("[email protected]",
"Example User"),
To("[email protected]",
"Example User")]
mock_pers['cc_list'] = [To("[email protected]",
"Example User"),
To("[email protected]",
"Example User")]
mock_pers['bcc_list'] = [To("[email protected]"),
To("[email protected]")]
mock_pers['subject'] = ("Hello World from the Personalized "
"SendGrid Python Library")
mock_pers['headers'] = [Header("X-Test", "test"),
Header("X-Mock", "true")]
mock_pers['substitutions'] = [Substitution("%name%", "Example User"),
Substitution("%city%", "Denver")]
mock_pers['custom_args'] = [CustomArg("user_id", "343"),
CustomArg("type", "marketing")]
mock_pers['send_at'] = 1443636843
return mock_pers |
def deserialize(self, mimetypes): # pylint: disable=arguments-differ
""" Invoke the deserializer
Upon successful deserialization a dict will be returned
containing the following key/vals:
{
'content': <uploaded object>,
'content-type': <content-type of content>,
'file-ext': <file extension based on content-type>,
'file-name': <file name of content>,
}
:param mimetypes:
allowed mimetypes of the object in the request
payload
:return:
normalized dict
"""
super(Deserializer, self).deserialize()
parts = self.parse(mimetypes)
data = self.normalize(parts)
return data | Invoke the deserializer
Upon successful deserialization a dict will be returned
containing the following key/vals:
{
'content': <uploaded object>,
'content-type': <content-type of content>,
'file-ext': <file extension based on content-type>,
'file-name': <file name of content>,
}
:param mimetypes:
allowed mimetypes of the object in the request
payload
:return:
normalized dict | Below is the the instruction that describes the task:
### Input:
Invoke the deserializer
Upon successful deserialization a dict will be returned
containing the following key/vals:
{
'content': <uploaded object>,
'content-type': <content-type of content>,
'file-ext': <file extension based on content-type>,
'file-name': <file name of content>,
}
:param mimetypes:
allowed mimetypes of the object in the request
payload
:return:
normalized dict
### Response:
def deserialize(self, mimetypes): # pylint: disable=arguments-differ
""" Invoke the deserializer
Upon successful deserialization a dict will be returned
containing the following key/vals:
{
'content': <uploaded object>,
'content-type': <content-type of content>,
'file-ext': <file extension based on content-type>,
'file-name': <file name of content>,
}
:param mimetypes:
allowed mimetypes of the object in the request
payload
:return:
normalized dict
"""
super(Deserializer, self).deserialize()
parts = self.parse(mimetypes)
data = self.normalize(parts)
return data |
def get_direct_fields_from_model(model_class):
""" Direct, not m2m, not FK """
direct_fields = []
all_fields_names = _get_all_field_names(model_class)
for field_name in all_fields_names:
field, model, direct, m2m = _get_field_by_name(model_class, field_name)
if direct and not m2m and not _get_remote_field(field):
direct_fields += [field]
return direct_fields | Direct, not m2m, not FK | Below is the the instruction that describes the task:
### Input:
Direct, not m2m, not FK
### Response:
def get_direct_fields_from_model(model_class):
""" Direct, not m2m, not FK """
direct_fields = []
all_fields_names = _get_all_field_names(model_class)
for field_name in all_fields_names:
field, model, direct, m2m = _get_field_by_name(model_class, field_name)
if direct and not m2m and not _get_remote_field(field):
direct_fields += [field]
return direct_fields |
def xtob(data, sep=''):
"""Interpret the hex encoding of a blob (string)."""
# remove the non-hex characters
data = re.sub("[^0-9a-fA-F]", '', data)
# interpret the hex
return binascii.unhexlify(data) | Interpret the hex encoding of a blob (string). | Below is the the instruction that describes the task:
### Input:
Interpret the hex encoding of a blob (string).
### Response:
def xtob(data, sep=''):
"""Interpret the hex encoding of a blob (string)."""
# remove the non-hex characters
data = re.sub("[^0-9a-fA-F]", '', data)
# interpret the hex
return binascii.unhexlify(data) |
def join(self, inner_iterable, outer_key_selector=identity,
inner_key_selector=identity,
result_selector=lambda outer, inner: (outer, inner)):
'''Perform an inner join with a second sequence using selected keys.
The order of elements from outer is maintained. For each of these the
order of elements from inner is also preserved.
Note: This method uses deferred execution.
Args:
inner_iterable: The sequence to join with the outer sequence.
outer_key_selector: An optional unary function to extract keys from
elements of the outer (source) sequence. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
inner_key_selector: An optional unary function to extract keys
from elements of the inner_iterable. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
result_selector: An optional binary function to create a result
element from two matching elements of the outer and inner. If
omitted the result elements will be a 2-tuple pair of the
matching outer and inner elements.
Returns:
A Queryable whose elements are the result of performing an inner-
join on two sequences.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the inner_iterable is not in fact iterable.
TypeError: If the outer_key_selector is not callable.
TypeError: If the inner_key_selector is not callable.
TypeError: If the result_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call join() on a closed Queryable.")
if not is_iterable(inner_iterable):
raise TypeError("Cannot compute join() with inner_iterable of "
"non-iterable {0}".format(str(type(inner_iterable))[7: -1]))
if not is_callable(outer_key_selector):
raise TypeError("join() parameter outer_key_selector={0} is not "
"callable".format(repr(outer_key_selector)))
if not is_callable(inner_key_selector):
raise TypeError("join() parameter inner_key_selector={0} is not "
"callable".format(repr(inner_key_selector)))
if not is_callable(result_selector):
raise TypeError("join() parameter result_selector={0} is not "
"callable".format(repr(result_selector)))
return self._create(self._generate_join_result(inner_iterable, outer_key_selector,
inner_key_selector, result_selector)) | Perform an inner join with a second sequence using selected keys.
The order of elements from outer is maintained. For each of these the
order of elements from inner is also preserved.
Note: This method uses deferred execution.
Args:
inner_iterable: The sequence to join with the outer sequence.
outer_key_selector: An optional unary function to extract keys from
elements of the outer (source) sequence. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
inner_key_selector: An optional unary function to extract keys
from elements of the inner_iterable. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
result_selector: An optional binary function to create a result
element from two matching elements of the outer and inner. If
omitted the result elements will be a 2-tuple pair of the
matching outer and inner elements.
Returns:
A Queryable whose elements are the result of performing an inner-
join on two sequences.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the inner_iterable is not in fact iterable.
TypeError: If the outer_key_selector is not callable.
TypeError: If the inner_key_selector is not callable.
TypeError: If the result_selector is not callable. | Below is the the instruction that describes the task:
### Input:
Perform an inner join with a second sequence using selected keys.
The order of elements from outer is maintained. For each of these the
order of elements from inner is also preserved.
Note: This method uses deferred execution.
Args:
inner_iterable: The sequence to join with the outer sequence.
outer_key_selector: An optional unary function to extract keys from
elements of the outer (source) sequence. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
inner_key_selector: An optional unary function to extract keys
from elements of the inner_iterable. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
result_selector: An optional binary function to create a result
element from two matching elements of the outer and inner. If
omitted the result elements will be a 2-tuple pair of the
matching outer and inner elements.
Returns:
A Queryable whose elements are the result of performing an inner-
join on two sequences.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the inner_iterable is not in fact iterable.
TypeError: If the outer_key_selector is not callable.
TypeError: If the inner_key_selector is not callable.
TypeError: If the result_selector is not callable.
### Response:
def join(self, inner_iterable, outer_key_selector=identity,
inner_key_selector=identity,
result_selector=lambda outer, inner: (outer, inner)):
'''Perform an inner join with a second sequence using selected keys.
The order of elements from outer is maintained. For each of these the
order of elements from inner is also preserved.
Note: This method uses deferred execution.
Args:
inner_iterable: The sequence to join with the outer sequence.
outer_key_selector: An optional unary function to extract keys from
elements of the outer (source) sequence. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
inner_key_selector: An optional unary function to extract keys
from elements of the inner_iterable. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
result_selector: An optional binary function to create a result
element from two matching elements of the outer and inner. If
omitted the result elements will be a 2-tuple pair of the
matching outer and inner elements.
Returns:
A Queryable whose elements are the result of performing an inner-
join on two sequences.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the inner_iterable is not in fact iterable.
TypeError: If the outer_key_selector is not callable.
TypeError: If the inner_key_selector is not callable.
TypeError: If the result_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call join() on a closed Queryable.")
if not is_iterable(inner_iterable):
raise TypeError("Cannot compute join() with inner_iterable of "
"non-iterable {0}".format(str(type(inner_iterable))[7: -1]))
if not is_callable(outer_key_selector):
raise TypeError("join() parameter outer_key_selector={0} is not "
"callable".format(repr(outer_key_selector)))
if not is_callable(inner_key_selector):
raise TypeError("join() parameter inner_key_selector={0} is not "
"callable".format(repr(inner_key_selector)))
if not is_callable(result_selector):
raise TypeError("join() parameter result_selector={0} is not "
"callable".format(repr(result_selector)))
return self._create(self._generate_join_result(inner_iterable, outer_key_selector,
inner_key_selector, result_selector)) |
def set(self, val):
"""
set value of this param
"""
assert not self.__isReadOnly, \
("This parameter(%s) was locked"
" and now it can not be changed" % self.name)
assert self.replacedWith is None, \
("This param was replaced with new one and this "
"should not exists")
val = toHVal(val)
self.defVal = val
self._val = val.staticEval()
self._dtype = self._val._dtype | set value of this param | Below is the the instruction that describes the task:
### Input:
set value of this param
### Response:
def set(self, val):
"""
set value of this param
"""
assert not self.__isReadOnly, \
("This parameter(%s) was locked"
" and now it can not be changed" % self.name)
assert self.replacedWith is None, \
("This param was replaced with new one and this "
"should not exists")
val = toHVal(val)
self.defVal = val
self._val = val.staticEval()
self._dtype = self._val._dtype |
def format_sdist_header_metadata(data, filename):
"""
Format the metadata of pypi packages stored in email header format.
Currently only used as backup on the wheel (compressed) file format.
"""
description = get_header_description(data)
config_items = python_version_check(data)
attrs = dict(config_items)
name = pop_key(attrs, 'Name', None)
basename = path.basename(filename)
if name is None:
name = basename.split('-')[0]
package_data = {
'name': name,
'summary': pop_key(attrs, 'Summary', None),
'license': pop_key(attrs, 'License', None),
}
release_data = {
'version': pop_key(attrs, 'Version'),
'description': pop_key(attrs, 'Description', description),
'home_page': pop_key(attrs, 'Home-page', None),
}
file_data = {
'basename': basename,
'attrs': {
'packagetype': 'sdist',
'python_version': 'source',
}
}
# Parse multiple keys
deps = []
exts = {}
environments = {}
for key, val in config_items:
if key in ['Requires-Dist', 'Requires']:
name, extras, const, marker, url = parse_specification(val)
name = norm_package_name(name)
specs = const.split(',')
new_specs = []
for spec in specs:
pos = [i for i, c in enumerate(spec) if c in '0123456789']
if pos:
pos = pos[0]
comp, spec_ = spec[:pos].strip(), spec[pos:].strip()
new_specs.append((comp, spec_))
# TODO: All this is to preserve the format used originally
# but is this really needed?
if marker:
if marker.startswith('extra'):
marker = marker.replace('extra', '')
marker = marker.replace('==', '').strip()
ext = marker.rsplit(' ')[-1]
if '"' in ext or "'" in ext:
ext = ext[1:-1]
if ext not in exts:
exts[ext] = [{'name': name, 'specs': new_specs}]
else:
exts[ext].append({'name': name, 'specs': new_specs})
else:
if marker not in environments:
environments[marker] = [{'name': name, 'specs': new_specs}]
else:
environments[marker].append({'name': name, 'specs': new_specs})
else:
deps.append({
'name': name,
'specs': new_specs,
})
deps.sort(key=lambda o: o['name'])
new_exts = []
for key, values in exts.items():
new_exts.append({'name': key, 'depends': values})
new_environments = []
for key, values in environments.items():
new_environments.append({'name': key, 'depends': values})
file_data.update(dependencies={
'has_dep_errors': False,
'depends': deps,
'extras': new_exts,
'environments': new_environments,
})
return package_data, release_data, file_data | Format the metadata of pypi packages stored in email header format.
Currently only used as backup on the wheel (compressed) file format. | Below is the the instruction that describes the task:
### Input:
Format the metadata of pypi packages stored in email header format.
Currently only used as backup on the wheel (compressed) file format.
### Response:
def format_sdist_header_metadata(data, filename):
"""
Format the metadata of pypi packages stored in email header format.
Currently only used as backup on the wheel (compressed) file format.
"""
description = get_header_description(data)
config_items = python_version_check(data)
attrs = dict(config_items)
name = pop_key(attrs, 'Name', None)
basename = path.basename(filename)
if name is None:
name = basename.split('-')[0]
package_data = {
'name': name,
'summary': pop_key(attrs, 'Summary', None),
'license': pop_key(attrs, 'License', None),
}
release_data = {
'version': pop_key(attrs, 'Version'),
'description': pop_key(attrs, 'Description', description),
'home_page': pop_key(attrs, 'Home-page', None),
}
file_data = {
'basename': basename,
'attrs': {
'packagetype': 'sdist',
'python_version': 'source',
}
}
# Parse multiple keys
deps = []
exts = {}
environments = {}
for key, val in config_items:
if key in ['Requires-Dist', 'Requires']:
name, extras, const, marker, url = parse_specification(val)
name = norm_package_name(name)
specs = const.split(',')
new_specs = []
for spec in specs:
pos = [i for i, c in enumerate(spec) if c in '0123456789']
if pos:
pos = pos[0]
comp, spec_ = spec[:pos].strip(), spec[pos:].strip()
new_specs.append((comp, spec_))
# TODO: All this is to preserve the format used originally
# but is this really needed?
if marker:
if marker.startswith('extra'):
marker = marker.replace('extra', '')
marker = marker.replace('==', '').strip()
ext = marker.rsplit(' ')[-1]
if '"' in ext or "'" in ext:
ext = ext[1:-1]
if ext not in exts:
exts[ext] = [{'name': name, 'specs': new_specs}]
else:
exts[ext].append({'name': name, 'specs': new_specs})
else:
if marker not in environments:
environments[marker] = [{'name': name, 'specs': new_specs}]
else:
environments[marker].append({'name': name, 'specs': new_specs})
else:
deps.append({
'name': name,
'specs': new_specs,
})
deps.sort(key=lambda o: o['name'])
new_exts = []
for key, values in exts.items():
new_exts.append({'name': key, 'depends': values})
new_environments = []
for key, values in environments.items():
new_environments.append({'name': key, 'depends': values})
file_data.update(dependencies={
'has_dep_errors': False,
'depends': deps,
'extras': new_exts,
'environments': new_environments,
})
return package_data, release_data, file_data |
def report(self, stream):
"""Writes an Xunit-formatted XML file
The file includes a report of test errors and failures.
"""
self.stats['encoding'] = self.encoding
self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+ self.stats['passes'] + self.stats['skipped'])
self.error_report_file.write(
u'<?xml version="1.0" encoding="%(encoding)s"?>'
u'<testsuite name="nosetests" tests="%(total)d" '
u'errors="%(errors)d" failures="%(failures)d" '
u'skip="%(skipped)d">' % self.stats)
self.error_report_file.write(u''.join([self._forceUnicode(e)
for e in self.errorlist]))
self.error_report_file.write(u'</testsuite>')
self.error_report_file.close()
if self.config.verbosity > 1:
stream.writeln("-" * 70)
stream.writeln("XML: %s" % self.error_report_file.name) | Writes an Xunit-formatted XML file
The file includes a report of test errors and failures. | Below is the the instruction that describes the task:
### Input:
Writes an Xunit-formatted XML file
The file includes a report of test errors and failures.
### Response:
def report(self, stream):
"""Writes an Xunit-formatted XML file
The file includes a report of test errors and failures.
"""
self.stats['encoding'] = self.encoding
self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+ self.stats['passes'] + self.stats['skipped'])
self.error_report_file.write(
u'<?xml version="1.0" encoding="%(encoding)s"?>'
u'<testsuite name="nosetests" tests="%(total)d" '
u'errors="%(errors)d" failures="%(failures)d" '
u'skip="%(skipped)d">' % self.stats)
self.error_report_file.write(u''.join([self._forceUnicode(e)
for e in self.errorlist]))
self.error_report_file.write(u'</testsuite>')
self.error_report_file.close()
if self.config.verbosity > 1:
stream.writeln("-" * 70)
stream.writeln("XML: %s" % self.error_report_file.name) |
def delete_record(self, name, recordid, username, password):
''' Delete record '''
#headers = {'key': username, 'secret': password}
req = requests.delete(self.api_server + '/api/' + name + '/' +
str(recordid), auth=(username, password))
return req | Delete record | Below is the the instruction that describes the task:
### Input:
Delete record
### Response:
def delete_record(self, name, recordid, username, password):
''' Delete record '''
#headers = {'key': username, 'secret': password}
req = requests.delete(self.api_server + '/api/' + name + '/' +
str(recordid), auth=(username, password))
return req |
def univec(self, databasepath):
"""
Download the UniVec core database
:param databasepath: path to use to save the database
"""
logging.info('Downloading univec database')
databasepath = self.create_database_folder(databasepath, 'univec')
# Set the name of the output file
outputfile = os.path.join(databasepath, 'UniVec_core.tfa')
target_url = 'ftp://ftp.ncbi.nlm.nih.gov/pub/UniVec/UniVec_Core'
self.database_download(output_file=outputfile,
target_url=target_url,
database_path=databasepath)
# Create a copy of the file with a .fasta extension
if os.path.isfile(outputfile):
renamed = os.path.splitext(outputfile)[0] + '.fasta'
shutil.copy(outputfile, renamed) | Download the UniVec core database
:param databasepath: path to use to save the database | Below is the the instruction that describes the task:
### Input:
Download the UniVec core database
:param databasepath: path to use to save the database
### Response:
def univec(self, databasepath):
"""
Download the UniVec core database
:param databasepath: path to use to save the database
"""
logging.info('Downloading univec database')
databasepath = self.create_database_folder(databasepath, 'univec')
# Set the name of the output file
outputfile = os.path.join(databasepath, 'UniVec_core.tfa')
target_url = 'ftp://ftp.ncbi.nlm.nih.gov/pub/UniVec/UniVec_Core'
self.database_download(output_file=outputfile,
target_url=target_url,
database_path=databasepath)
# Create a copy of the file with a .fasta extension
if os.path.isfile(outputfile):
renamed = os.path.splitext(outputfile)[0] + '.fasta'
shutil.copy(outputfile, renamed) |
def get_choices(field):
"""
Find choices of a field, whether it has choices or has a queryset.
Args:
field (BoundField): Django form boundfield
Returns:
list: List of choices
"""
empty_label = getattr(field.field, "empty_label", False)
needs_empty_value = False
choices = []
# Data is the choices
if hasattr(field.field, "_choices"):
choices = field.field._choices
# Data is a queryset
elif hasattr(field.field, "_queryset"):
queryset = field.field._queryset
field_name = getattr(field.field, "to_field_name") or "pk"
choices += ((getattr(obj, field_name), str(obj)) for obj in queryset)
# Determine if an empty value is needed
if choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]):
needs_empty_value = True
# Delete empty option
if not choices[0][0]:
del choices[0]
# Remove dashed empty choice
if empty_label == BLANK_CHOICE_DASH[0][1]:
empty_label = None
# Add custom empty value
if empty_label or not field.field.required:
if needs_empty_value:
choices.insert(0, ("", empty_label or BLANK_CHOICE_DASH[0][1]))
return choices | Find choices of a field, whether it has choices or has a queryset.
Args:
field (BoundField): Django form boundfield
Returns:
list: List of choices | Below is the the instruction that describes the task:
### Input:
Find choices of a field, whether it has choices or has a queryset.
Args:
field (BoundField): Django form boundfield
Returns:
list: List of choices
### Response:
def get_choices(field):
"""
Find choices of a field, whether it has choices or has a queryset.
Args:
field (BoundField): Django form boundfield
Returns:
list: List of choices
"""
empty_label = getattr(field.field, "empty_label", False)
needs_empty_value = False
choices = []
# Data is the choices
if hasattr(field.field, "_choices"):
choices = field.field._choices
# Data is a queryset
elif hasattr(field.field, "_queryset"):
queryset = field.field._queryset
field_name = getattr(field.field, "to_field_name") or "pk"
choices += ((getattr(obj, field_name), str(obj)) for obj in queryset)
# Determine if an empty value is needed
if choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]):
needs_empty_value = True
# Delete empty option
if not choices[0][0]:
del choices[0]
# Remove dashed empty choice
if empty_label == BLANK_CHOICE_DASH[0][1]:
empty_label = None
# Add custom empty value
if empty_label or not field.field.required:
if needs_empty_value:
choices.insert(0, ("", empty_label or BLANK_CHOICE_DASH[0][1]))
return choices |
def read_request_line(self, request_line):
""" Read HTTP-request line
:param request_line: line to parse
for HTTP/0.9 is GET <Request-URI>
for HTTP/1.0 and 1.1 is <METHOD> <Request-URI> HTTP/<HTTP-Version>, where HTTP-Version is 1.0
or 1.1.
for HTTP/2: binary headers are used
"""
request = self.__request_cls.parse_request_line(self, request_line)
protocol_version = self.protocol_version()
if protocol_version == '0.9':
if request.method() != 'GET':
raise Exception('HTTP/0.9 standard violation')
elif protocol_version == '1.0' or protocol_version == '1.1':
pass
elif protocol_version == '2':
pass
else:
raise RuntimeError('Unsupported HTTP-protocol') | Read HTTP-request line
:param request_line: line to parse
for HTTP/0.9 is GET <Request-URI>
for HTTP/1.0 and 1.1 is <METHOD> <Request-URI> HTTP/<HTTP-Version>, where HTTP-Version is 1.0
or 1.1.
for HTTP/2: binary headers are used | Below is the the instruction that describes the task:
### Input:
Read HTTP-request line
:param request_line: line to parse
for HTTP/0.9 is GET <Request-URI>
for HTTP/1.0 and 1.1 is <METHOD> <Request-URI> HTTP/<HTTP-Version>, where HTTP-Version is 1.0
or 1.1.
for HTTP/2: binary headers are used
### Response:
def read_request_line(self, request_line):
""" Read HTTP-request line
:param request_line: line to parse
for HTTP/0.9 is GET <Request-URI>
for HTTP/1.0 and 1.1 is <METHOD> <Request-URI> HTTP/<HTTP-Version>, where HTTP-Version is 1.0
or 1.1.
for HTTP/2: binary headers are used
"""
request = self.__request_cls.parse_request_line(self, request_line)
protocol_version = self.protocol_version()
if protocol_version == '0.9':
if request.method() != 'GET':
raise Exception('HTTP/0.9 standard violation')
elif protocol_version == '1.0' or protocol_version == '1.1':
pass
elif protocol_version == '2':
pass
else:
raise RuntimeError('Unsupported HTTP-protocol') |
def default_logging_dict(*loggers: str, **kwargs: Any) -> DictStrAny:
r"""Prepare logging dict suitable with ``logging.config.dictConfig``.
**Usage**::
from logging.config import dictConfig
dictConfig(default_logging_dict('yourlogger'))
:param \*loggers: Enable logging for each logger in sequence.
:param \*\*kwargs: Setup additional logger params via keyword arguments.
"""
kwargs.setdefault('level', 'INFO')
return {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'ignore_errors': {
'()': IgnoreErrorsFilter,
},
},
'formatters': {
'default': {
'format': '%(asctime)s [%(levelname)s:%(name)s] %(message)s',
},
'naked': {
'format': u'%(message)s',
},
},
'handlers': {
'stdout': {
'class': 'logging.StreamHandler',
'filters': ['ignore_errors'],
'formatter': 'default',
'level': 'DEBUG',
'stream': sys.stdout,
},
'stderr': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'WARNING',
'stream': sys.stderr,
},
},
'loggers': {
logger: dict(handlers=['stdout', 'stderr'], **kwargs)
for logger in loggers
},
} | r"""Prepare logging dict suitable with ``logging.config.dictConfig``.
**Usage**::
from logging.config import dictConfig
dictConfig(default_logging_dict('yourlogger'))
:param \*loggers: Enable logging for each logger in sequence.
:param \*\*kwargs: Setup additional logger params via keyword arguments. | Below is the the instruction that describes the task:
### Input:
r"""Prepare logging dict suitable with ``logging.config.dictConfig``.
**Usage**::
from logging.config import dictConfig
dictConfig(default_logging_dict('yourlogger'))
:param \*loggers: Enable logging for each logger in sequence.
:param \*\*kwargs: Setup additional logger params via keyword arguments.
### Response:
def default_logging_dict(*loggers: str, **kwargs: Any) -> DictStrAny:
r"""Prepare logging dict suitable with ``logging.config.dictConfig``.
**Usage**::
from logging.config import dictConfig
dictConfig(default_logging_dict('yourlogger'))
:param \*loggers: Enable logging for each logger in sequence.
:param \*\*kwargs: Setup additional logger params via keyword arguments.
"""
kwargs.setdefault('level', 'INFO')
return {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'ignore_errors': {
'()': IgnoreErrorsFilter,
},
},
'formatters': {
'default': {
'format': '%(asctime)s [%(levelname)s:%(name)s] %(message)s',
},
'naked': {
'format': u'%(message)s',
},
},
'handlers': {
'stdout': {
'class': 'logging.StreamHandler',
'filters': ['ignore_errors'],
'formatter': 'default',
'level': 'DEBUG',
'stream': sys.stdout,
},
'stderr': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'WARNING',
'stream': sys.stderr,
},
},
'loggers': {
logger: dict(handlers=['stdout', 'stderr'], **kwargs)
for logger in loggers
},
} |
def get(self, name):
"""Get the vrrp configurations for a single node interface
Args:
name (string): The name of the interface for which vrrp
configurations will be retrieved.
Returns:
A dictionary containing the vrrp configurations on the interface.
Returns None if no vrrp configurations are defined or
if the interface is not configured.
"""
# Validate the interface and vrid are specified
interface = name
if not interface:
raise ValueError("Vrrp.get(): interface must contain a value.")
# Get the config for the interface. Return None if the
# interface is not defined
config = self.get_block('interface %s' % interface)
if config is None:
return config
# Find all occurrences of vrids in this interface and make
# a set of the unique vrid numbers
match = set(re.findall(r'^\s+(?:no |)vrrp (\d+)', config, re.M))
if not match:
return None
# Initialize the result dict
result = dict()
for vrid in match:
subd = dict()
# Parse the vrrp configuration for the vrid(s) in the list
subd.update(self._parse_delay_reload(config, vrid))
subd.update(self._parse_description(config, vrid))
subd.update(self._parse_enable(config, vrid))
subd.update(self._parse_ip_version(config, vrid))
subd.update(self._parse_mac_addr_adv_interval(config, vrid))
subd.update(self._parse_preempt(config, vrid))
subd.update(self._parse_preempt_delay_min(config, vrid))
subd.update(self._parse_preempt_delay_reload(config, vrid))
subd.update(self._parse_primary_ip(config, vrid))
subd.update(self._parse_priority(config, vrid))
subd.update(self._parse_secondary_ip(config, vrid))
subd.update(self._parse_timers_advertise(config, vrid))
subd.update(self._parse_track(config, vrid))
subd.update(self._parse_bfd_ip(config, vrid))
result.update({int(vrid): subd})
# If result dict is empty, return None, otherwise return result
return result if result else None | Get the vrrp configurations for a single node interface
Args:
name (string): The name of the interface for which vrrp
configurations will be retrieved.
Returns:
A dictionary containing the vrrp configurations on the interface.
Returns None if no vrrp configurations are defined or
if the interface is not configured. | Below is the the instruction that describes the task:
### Input:
Get the vrrp configurations for a single node interface
Args:
name (string): The name of the interface for which vrrp
configurations will be retrieved.
Returns:
A dictionary containing the vrrp configurations on the interface.
Returns None if no vrrp configurations are defined or
if the interface is not configured.
### Response:
def get(self, name):
"""Get the vrrp configurations for a single node interface
Args:
name (string): The name of the interface for which vrrp
configurations will be retrieved.
Returns:
A dictionary containing the vrrp configurations on the interface.
Returns None if no vrrp configurations are defined or
if the interface is not configured.
"""
# Validate the interface and vrid are specified
interface = name
if not interface:
raise ValueError("Vrrp.get(): interface must contain a value.")
# Get the config for the interface. Return None if the
# interface is not defined
config = self.get_block('interface %s' % interface)
if config is None:
return config
# Find all occurrences of vrids in this interface and make
# a set of the unique vrid numbers
match = set(re.findall(r'^\s+(?:no |)vrrp (\d+)', config, re.M))
if not match:
return None
# Initialize the result dict
result = dict()
for vrid in match:
subd = dict()
# Parse the vrrp configuration for the vrid(s) in the list
subd.update(self._parse_delay_reload(config, vrid))
subd.update(self._parse_description(config, vrid))
subd.update(self._parse_enable(config, vrid))
subd.update(self._parse_ip_version(config, vrid))
subd.update(self._parse_mac_addr_adv_interval(config, vrid))
subd.update(self._parse_preempt(config, vrid))
subd.update(self._parse_preempt_delay_min(config, vrid))
subd.update(self._parse_preempt_delay_reload(config, vrid))
subd.update(self._parse_primary_ip(config, vrid))
subd.update(self._parse_priority(config, vrid))
subd.update(self._parse_secondary_ip(config, vrid))
subd.update(self._parse_timers_advertise(config, vrid))
subd.update(self._parse_track(config, vrid))
subd.update(self._parse_bfd_ip(config, vrid))
result.update({int(vrid): subd})
# If result dict is empty, return None, otherwise return result
return result if result else None |
def field(self, field_name, boost=1, extractor=None):
"""Adds a field to the list of document fields that will be indexed.
Every document being indexed should have this field. None values for
this field in indexed documents will not cause errors but will limit
the chance of that document being retrieved by searches.
All fields should be added before adding documents to the index. Adding
fields after a document has been indexed will have no effect on already
indexed documents.
Fields can be boosted at build time. This allows terms within that
field to have more importance on search results. Use a field boost to
specify that matches within one field are more important that other
fields.
Args:
field_name (str): Name of the field to be added, must not include
a forward slash '/'.
boost (int): Optional boost factor to apply to field.
extractor (callable): Optional function to extract a field from
the document.
Raises:
ValueError: If the field name contains a `/`.
"""
if "/" in field_name:
raise ValueError("Field {} contains illegal character `/`")
self._fields[field_name] = Field(field_name, boost, extractor) | Adds a field to the list of document fields that will be indexed.
Every document being indexed should have this field. None values for
this field in indexed documents will not cause errors but will limit
the chance of that document being retrieved by searches.
All fields should be added before adding documents to the index. Adding
fields after a document has been indexed will have no effect on already
indexed documents.
Fields can be boosted at build time. This allows terms within that
field to have more importance on search results. Use a field boost to
specify that matches within one field are more important that other
fields.
Args:
field_name (str): Name of the field to be added, must not include
a forward slash '/'.
boost (int): Optional boost factor to apply to field.
extractor (callable): Optional function to extract a field from
the document.
Raises:
ValueError: If the field name contains a `/`. | Below is the the instruction that describes the task:
### Input:
Adds a field to the list of document fields that will be indexed.
Every document being indexed should have this field. None values for
this field in indexed documents will not cause errors but will limit
the chance of that document being retrieved by searches.
All fields should be added before adding documents to the index. Adding
fields after a document has been indexed will have no effect on already
indexed documents.
Fields can be boosted at build time. This allows terms within that
field to have more importance on search results. Use a field boost to
specify that matches within one field are more important that other
fields.
Args:
field_name (str): Name of the field to be added, must not include
a forward slash '/'.
boost (int): Optional boost factor to apply to field.
extractor (callable): Optional function to extract a field from
the document.
Raises:
ValueError: If the field name contains a `/`.
### Response:
def field(self, field_name, boost=1, extractor=None):
"""Adds a field to the list of document fields that will be indexed.
Every document being indexed should have this field. None values for
this field in indexed documents will not cause errors but will limit
the chance of that document being retrieved by searches.
All fields should be added before adding documents to the index. Adding
fields after a document has been indexed will have no effect on already
indexed documents.
Fields can be boosted at build time. This allows terms within that
field to have more importance on search results. Use a field boost to
specify that matches within one field are more important that other
fields.
Args:
field_name (str): Name of the field to be added, must not include
a forward slash '/'.
boost (int): Optional boost factor to apply to field.
extractor (callable): Optional function to extract a field from
the document.
Raises:
ValueError: If the field name contains a `/`.
"""
if "/" in field_name:
raise ValueError("Field {} contains illegal character `/`")
self._fields[field_name] = Field(field_name, boost, extractor) |
def call_task_fn(self):
"""Call the function attached to the task."""
if not self.fn:
return self.log_finished()
future = asyncio.Future()
future.add_done_callback(lambda x: self.log_finished())
if inspect.iscoroutinefunction(self.fn):
f = asyncio.ensure_future(self.fn())
f.add_done_callback(lambda x: self.bind_end(x.result(), future))
else:
self.bind_end(self.fn(), future)
return future | Call the function attached to the task. | Below is the the instruction that describes the task:
### Input:
Call the function attached to the task.
### Response:
def call_task_fn(self):
"""Call the function attached to the task."""
if not self.fn:
return self.log_finished()
future = asyncio.Future()
future.add_done_callback(lambda x: self.log_finished())
if inspect.iscoroutinefunction(self.fn):
f = asyncio.ensure_future(self.fn())
f.add_done_callback(lambda x: self.bind_end(x.result(), future))
else:
self.bind_end(self.fn(), future)
return future |
def createMenu( self ):
"""
Creates a new menu with the given name.
"""
name, accepted = QInputDialog.getText( self,
'Create Menu',
'Name: ')
if ( accepted ):
self.addMenuItem(self.createMenuItem(name),
self.uiMenuTREE.currentItem()) | Creates a new menu with the given name. | Below is the the instruction that describes the task:
### Input:
Creates a new menu with the given name.
### Response:
def createMenu( self ):
"""
Creates a new menu with the given name.
"""
name, accepted = QInputDialog.getText( self,
'Create Menu',
'Name: ')
if ( accepted ):
self.addMenuItem(self.createMenuItem(name),
self.uiMenuTREE.currentItem()) |
def getGroup(self, networkId, groupNodeId, verbose=None):
"""
Returns the group specified by the `groupNodeId` and `networkId` parameters.
:param networkId: SUID of the Network
:param groupNodeId: SUID of the Node representing the Group
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/groups/'+str(groupNodeId)+'', method="GET", verbose=verbose, parse_params=False)
return response | Returns the group specified by the `groupNodeId` and `networkId` parameters.
:param networkId: SUID of the Network
:param groupNodeId: SUID of the Node representing the Group
:param verbose: print more
:returns: 200: successful operation | Below is the the instruction that describes the task:
### Input:
Returns the group specified by the `groupNodeId` and `networkId` parameters.
:param networkId: SUID of the Network
:param groupNodeId: SUID of the Node representing the Group
:param verbose: print more
:returns: 200: successful operation
### Response:
def getGroup(self, networkId, groupNodeId, verbose=None):
"""
Returns the group specified by the `groupNodeId` and `networkId` parameters.
:param networkId: SUID of the Network
:param groupNodeId: SUID of the Node representing the Group
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/groups/'+str(groupNodeId)+'', method="GET", verbose=verbose, parse_params=False)
return response |
def addinnode(self, otherplus, node, objectname):
"""add an item to the node.
example: add a new zone to the element 'ZONE' """
# do a test for unique object here
newelement = otherplus.dt[node.upper()] | add an item to the node.
example: add a new zone to the element 'ZONE' | Below is the the instruction that describes the task:
### Input:
add an item to the node.
example: add a new zone to the element 'ZONE'
### Response:
def addinnode(self, otherplus, node, objectname):
"""add an item to the node.
example: add a new zone to the element 'ZONE' """
# do a test for unique object here
newelement = otherplus.dt[node.upper()] |
def filelines(fname,strip=False):
'''read lines from a file into lines...optional strip'''
with open(fname,'r') as f:
lines = f.readlines();
if strip:
lines[:] = [line.strip() for line in lines]
return lines; | read lines from a file into lines...optional strip | Below is the the instruction that describes the task:
### Input:
read lines from a file into lines...optional strip
### Response:
def filelines(fname,strip=False):
'''read lines from a file into lines...optional strip'''
with open(fname,'r') as f:
lines = f.readlines();
if strip:
lines[:] = [line.strip() for line in lines]
return lines; |
def message(self, body, room_id, style='text'):
''' Send a message to the given room '''
# TODO Automatically detect body format ?
path = 'rooms/message'
data = {
'room_id': room_id,
'message': body,
'from': self.name,
'notify': 1,
'message_format': style,
'color': self.bg_color
}
log.info('sending message to hipchat', message=body, room=room_id)
feedback = self._api_call(path, data, requests.post)
log.debug(feedback)
return feedback | Send a message to the given room | Below is the the instruction that describes the task:
### Input:
Send a message to the given room
### Response:
def message(self, body, room_id, style='text'):
''' Send a message to the given room '''
# TODO Automatically detect body format ?
path = 'rooms/message'
data = {
'room_id': room_id,
'message': body,
'from': self.name,
'notify': 1,
'message_format': style,
'color': self.bg_color
}
log.info('sending message to hipchat', message=body, room=room_id)
feedback = self._api_call(path, data, requests.post)
log.debug(feedback)
return feedback |
def _prepare_io_handler(self, handler):
"""Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done.
"""
logger.debug(" preparing handler: {0!r}".format(handler))
self._unprepared_pending.discard(handler)
ret = handler.prepare()
logger.debug(" prepare result: {0!r}".format(ret))
if isinstance(ret, HandlerReady):
del self._unprepared_handlers[handler]
prepared = True
elif isinstance(ret, PrepareAgain):
if ret.timeout == 0:
tag = glib.idle_add(self._prepare_io_handler_cb, handler)
self._prepare_sources[handler] = tag
elif ret.timeout is not None:
timeout = ret.timeout
timeout = int(timeout * 1000)
if not timeout:
timeout = 1
tag = glib.timeout_add(timeout, self._prepare_io_handler_cb,
handler)
self._prepare_sources[handler] = tag
else:
self._unprepared_pending.add(handler)
prepared = False
else:
raise TypeError("Unexpected result type from prepare()")
return prepared | Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done. | Below is the the instruction that describes the task:
### Input:
Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done.
### Response:
def _prepare_io_handler(self, handler):
"""Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done.
"""
logger.debug(" preparing handler: {0!r}".format(handler))
self._unprepared_pending.discard(handler)
ret = handler.prepare()
logger.debug(" prepare result: {0!r}".format(ret))
if isinstance(ret, HandlerReady):
del self._unprepared_handlers[handler]
prepared = True
elif isinstance(ret, PrepareAgain):
if ret.timeout == 0:
tag = glib.idle_add(self._prepare_io_handler_cb, handler)
self._prepare_sources[handler] = tag
elif ret.timeout is not None:
timeout = ret.timeout
timeout = int(timeout * 1000)
if not timeout:
timeout = 1
tag = glib.timeout_add(timeout, self._prepare_io_handler_cb,
handler)
self._prepare_sources[handler] = tag
else:
self._unprepared_pending.add(handler)
prepared = False
else:
raise TypeError("Unexpected result type from prepare()")
return prepared |
def db_stats(self):
"""Get database statistics.
Returns:
DBStats: Total clicks and links statistics.
Raises:
requests.exceptions.HTTPError: Generic HTTP Error
"""
data = dict(action='db-stats')
jsondata = self._api_request(params=data)
stats = DBStats(total_clicks=int(jsondata['db-stats']['total_clicks']),
total_links=int(jsondata['db-stats']['total_links']))
return stats | Get database statistics.
Returns:
DBStats: Total clicks and links statistics.
Raises:
requests.exceptions.HTTPError: Generic HTTP Error | Below is the the instruction that describes the task:
### Input:
Get database statistics.
Returns:
DBStats: Total clicks and links statistics.
Raises:
requests.exceptions.HTTPError: Generic HTTP Error
### Response:
def db_stats(self):
"""Get database statistics.
Returns:
DBStats: Total clicks and links statistics.
Raises:
requests.exceptions.HTTPError: Generic HTTP Error
"""
data = dict(action='db-stats')
jsondata = self._api_request(params=data)
stats = DBStats(total_clicks=int(jsondata['db-stats']['total_clicks']),
total_links=int(jsondata['db-stats']['total_links']))
return stats |
def get_parse(self, show=True, proxy=None, timeout=0):
"""
GET MediaWiki:API action=parse request
https://en.wikipedia.org/w/api.php?action=help&modules=parse
Required {params}: title OR pageid
- title: <str> article title
- pageid: <int> Wikipedia database ID
Optional arguments:
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- image: <dict> {parse-image, parse-cover}
- infobox: <dict> Infobox data as python dictionary
- iwlinks: <list> interwiki links
- pageid: <int> Wikipedia database ID
- parsetree: <str> XML parse tree
- requests: list of request actions made
- wikibase: <str> Wikidata entity ID or wikidata URL
- wikitext: <str> raw wikitext URL
"""
if not self.params.get('title') and not self.params.get('pageid'):
raise ValueError("get_parse needs title or pageid")
self._get('parse', show, proxy, timeout)
return self | GET MediaWiki:API action=parse request
https://en.wikipedia.org/w/api.php?action=help&modules=parse
Required {params}: title OR pageid
- title: <str> article title
- pageid: <int> Wikipedia database ID
Optional arguments:
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- image: <dict> {parse-image, parse-cover}
- infobox: <dict> Infobox data as python dictionary
- iwlinks: <list> interwiki links
- pageid: <int> Wikipedia database ID
- parsetree: <str> XML parse tree
- requests: list of request actions made
- wikibase: <str> Wikidata entity ID or wikidata URL
- wikitext: <str> raw wikitext URL | Below is the the instruction that describes the task:
### Input:
GET MediaWiki:API action=parse request
https://en.wikipedia.org/w/api.php?action=help&modules=parse
Required {params}: title OR pageid
- title: <str> article title
- pageid: <int> Wikipedia database ID
Optional arguments:
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- image: <dict> {parse-image, parse-cover}
- infobox: <dict> Infobox data as python dictionary
- iwlinks: <list> interwiki links
- pageid: <int> Wikipedia database ID
- parsetree: <str> XML parse tree
- requests: list of request actions made
- wikibase: <str> Wikidata entity ID or wikidata URL
- wikitext: <str> raw wikitext URL
### Response:
def get_parse(self, show=True, proxy=None, timeout=0):
"""
GET MediaWiki:API action=parse request
https://en.wikipedia.org/w/api.php?action=help&modules=parse
Required {params}: title OR pageid
- title: <str> article title
- pageid: <int> Wikipedia database ID
Optional arguments:
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- image: <dict> {parse-image, parse-cover}
- infobox: <dict> Infobox data as python dictionary
- iwlinks: <list> interwiki links
- pageid: <int> Wikipedia database ID
- parsetree: <str> XML parse tree
- requests: list of request actions made
- wikibase: <str> Wikidata entity ID or wikidata URL
- wikitext: <str> raw wikitext URL
"""
if not self.params.get('title') and not self.params.get('pageid'):
raise ValueError("get_parse needs title or pageid")
self._get('parse', show, proxy, timeout)
return self |
def get_addresses_details(address_list, coin_symbol='btc', txn_limit=None, api_key=None,
before_bh=None, after_bh=None, unspent_only=False, show_confidence=False,
confirmations=0, include_script=False):
'''
Batch version of get_address_details method
'''
for address in address_list:
assert is_valid_address_for_coinsymbol(
b58_address=address,
coin_symbol=coin_symbol), address
assert isinstance(show_confidence, bool), show_confidence
kwargs = dict(addrs=';'.join([str(addr) for addr in address_list]))
url = make_url(coin_symbol, **kwargs)
params = {}
if txn_limit:
params['limit'] = txn_limit
if api_key:
params['token'] = api_key
if before_bh:
params['before'] = before_bh
if after_bh:
params['after'] = after_bh
if confirmations:
params['confirmations'] = confirmations
if unspent_only:
params['unspentOnly'] = 'true'
if show_confidence:
params['includeConfidence'] = 'true'
if include_script:
params['includeScript'] = 'true'
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
r = get_valid_json(r)
return [_clean_tx(response_dict=d) for d in r] | Batch version of get_address_details method | Below is the the instruction that describes the task:
### Input:
Batch version of get_address_details method
### Response:
def get_addresses_details(address_list, coin_symbol='btc', txn_limit=None, api_key=None,
before_bh=None, after_bh=None, unspent_only=False, show_confidence=False,
confirmations=0, include_script=False):
'''
Batch version of get_address_details method
'''
for address in address_list:
assert is_valid_address_for_coinsymbol(
b58_address=address,
coin_symbol=coin_symbol), address
assert isinstance(show_confidence, bool), show_confidence
kwargs = dict(addrs=';'.join([str(addr) for addr in address_list]))
url = make_url(coin_symbol, **kwargs)
params = {}
if txn_limit:
params['limit'] = txn_limit
if api_key:
params['token'] = api_key
if before_bh:
params['before'] = before_bh
if after_bh:
params['after'] = after_bh
if confirmations:
params['confirmations'] = confirmations
if unspent_only:
params['unspentOnly'] = 'true'
if show_confidence:
params['includeConfidence'] = 'true'
if include_script:
params['includeScript'] = 'true'
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
r = get_valid_json(r)
return [_clean_tx(response_dict=d) for d in r] |
def join_all(domain, *parts):
"""
Join all url components.
Example::
>>> join_all("https://www.apple.com", "iphone")
https://www.apple.com/iphone
:param domain: Domain parts, example: https://www.python.org
:param parts: Other parts, example: "/doc", "/py27"
:return: url
"""
l = list()
if domain.endswith("/"):
domain = domain[:-1]
l.append(domain)
for part in parts:
for i in part.split("/"):
if i.strip():
l.append(i)
url = "/".join(l)
return url | Join all url components.
Example::
>>> join_all("https://www.apple.com", "iphone")
https://www.apple.com/iphone
:param domain: Domain parts, example: https://www.python.org
:param parts: Other parts, example: "/doc", "/py27"
:return: url | Below is the the instruction that describes the task:
### Input:
Join all url components.
Example::
>>> join_all("https://www.apple.com", "iphone")
https://www.apple.com/iphone
:param domain: Domain parts, example: https://www.python.org
:param parts: Other parts, example: "/doc", "/py27"
:return: url
### Response:
def join_all(domain, *parts):
"""
Join all url components.
Example::
>>> join_all("https://www.apple.com", "iphone")
https://www.apple.com/iphone
:param domain: Domain parts, example: https://www.python.org
:param parts: Other parts, example: "/doc", "/py27"
:return: url
"""
l = list()
if domain.endswith("/"):
domain = domain[:-1]
l.append(domain)
for part in parts:
for i in part.split("/"):
if i.strip():
l.append(i)
url = "/".join(l)
return url |
def writejar(self, jar):
"""Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest.
:param string jar: the path to the pre-existing jar to graft into this jar
"""
if not jar or not isinstance(jar, string_types):
raise ValueError('The jar path must be a non-empty string')
self._jars.append(jar) | Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest.
:param string jar: the path to the pre-existing jar to graft into this jar | Below is the the instruction that describes the task:
### Input:
Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest.
:param string jar: the path to the pre-existing jar to graft into this jar
### Response:
def writejar(self, jar):
"""Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest.
:param string jar: the path to the pre-existing jar to graft into this jar
"""
if not jar or not isinstance(jar, string_types):
raise ValueError('The jar path must be a non-empty string')
self._jars.append(jar) |
def get_activity_admin_session_for_objective_bank(self, objective_bank_id, proxy, *args, **kwargs):
"""Gets the ``OsidSession`` associated with the activity admin service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ActivityAdminSession``
:rtype: ``osid.learning.ActivityAdminSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_activity_admin()`` and ``supports_visible_federation()`` are ``true``.*
"""
if not objective_bank_id:
raise NullArgument
if not self.supports_activity_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ActivityAdminSession(objective_bank_id=objective_bank_id, proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | Gets the ``OsidSession`` associated with the activity admin service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ActivityAdminSession``
:rtype: ``osid.learning.ActivityAdminSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_activity_admin()`` and ``supports_visible_federation()`` are ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the activity admin service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ActivityAdminSession``
:rtype: ``osid.learning.ActivityAdminSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_activity_admin()`` and ``supports_visible_federation()`` are ``true``.*
### Response:
def get_activity_admin_session_for_objective_bank(self, objective_bank_id, proxy, *args, **kwargs):
"""Gets the ``OsidSession`` associated with the activity admin service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ActivityAdminSession``
:rtype: ``osid.learning.ActivityAdminSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_activity_admin()`` and ``supports_visible_federation()`` are ``true``.*
"""
if not objective_bank_id:
raise NullArgument
if not self.supports_activity_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ActivityAdminSession(objective_bank_id=objective_bank_id, proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session |
def remove_core_element(self, model):
"""Remove respective core element of handed global variable name
:param str model: String that is the key/gv_name of core element which should be removed
:return:
"""
gv_name = model
if self.global_variable_is_editable(gv_name, "Deletion"):
try:
self.model.global_variable_manager.delete_variable(gv_name)
except AttributeError as e:
logger.warning("The respective global variable '{1}' couldn't be removed. -> {0}"
"".format(e, model)) | Remove respective core element of handed global variable name
:param str model: String that is the key/gv_name of core element which should be removed
:return: | Below is the the instruction that describes the task:
### Input:
Remove respective core element of handed global variable name
:param str model: String that is the key/gv_name of core element which should be removed
:return:
### Response:
def remove_core_element(self, model):
"""Remove respective core element of handed global variable name
:param str model: String that is the key/gv_name of core element which should be removed
:return:
"""
gv_name = model
if self.global_variable_is_editable(gv_name, "Deletion"):
try:
self.model.global_variable_manager.delete_variable(gv_name)
except AttributeError as e:
logger.warning("The respective global variable '{1}' couldn't be removed. -> {0}"
"".format(e, model)) |
def which(program, path=None):
"""
Returns the full path of shell commands.
Replicates the functionality of system which (1) command. Looks
for the named program in the directories indicated in the $PATH
environment variable, and returns the full path if found.
Examples:
>>> system.which("ls")
"/bin/ls"
>>> system.which("/bin/ls")
"/bin/ls"
>>> system.which("not-a-real-command")
None
>>> system.which("ls", path=("/usr/bin", "/bin"))
"/bin/ls"
Arguments:
program (str): The name of the program to look for. Can
be an absolute path.
path (sequence of str, optional): A list of directories to
look for the pgoram in. Default value is system $PATH.
Returns:
str: Full path to program if found, else None.
"""
# If path is not given, read the $PATH environment variable.
path = path or os.environ["PATH"].split(os.pathsep)
abspath = True if os.path.split(program)[0] else False
if abspath:
if fs.isexe(program):
return program
else:
for directory in path:
# De-quote directories.
directory = directory.strip('"')
exe_file = os.path.join(directory, program)
if fs.isexe(exe_file):
return exe_file
return None | Returns the full path of shell commands.
Replicates the functionality of system which (1) command. Looks
for the named program in the directories indicated in the $PATH
environment variable, and returns the full path if found.
Examples:
>>> system.which("ls")
"/bin/ls"
>>> system.which("/bin/ls")
"/bin/ls"
>>> system.which("not-a-real-command")
None
>>> system.which("ls", path=("/usr/bin", "/bin"))
"/bin/ls"
Arguments:
program (str): The name of the program to look for. Can
be an absolute path.
path (sequence of str, optional): A list of directories to
look for the pgoram in. Default value is system $PATH.
Returns:
str: Full path to program if found, else None. | Below is the the instruction that describes the task:
### Input:
Returns the full path of shell commands.
Replicates the functionality of system which (1) command. Looks
for the named program in the directories indicated in the $PATH
environment variable, and returns the full path if found.
Examples:
>>> system.which("ls")
"/bin/ls"
>>> system.which("/bin/ls")
"/bin/ls"
>>> system.which("not-a-real-command")
None
>>> system.which("ls", path=("/usr/bin", "/bin"))
"/bin/ls"
Arguments:
program (str): The name of the program to look for. Can
be an absolute path.
path (sequence of str, optional): A list of directories to
look for the pgoram in. Default value is system $PATH.
Returns:
str: Full path to program if found, else None.
### Response:
def which(program, path=None):
"""
Returns the full path of shell commands.
Replicates the functionality of system which (1) command. Looks
for the named program in the directories indicated in the $PATH
environment variable, and returns the full path if found.
Examples:
>>> system.which("ls")
"/bin/ls"
>>> system.which("/bin/ls")
"/bin/ls"
>>> system.which("not-a-real-command")
None
>>> system.which("ls", path=("/usr/bin", "/bin"))
"/bin/ls"
Arguments:
program (str): The name of the program to look for. Can
be an absolute path.
path (sequence of str, optional): A list of directories to
look for the pgoram in. Default value is system $PATH.
Returns:
str: Full path to program if found, else None.
"""
# If path is not given, read the $PATH environment variable.
path = path or os.environ["PATH"].split(os.pathsep)
abspath = True if os.path.split(program)[0] else False
if abspath:
if fs.isexe(program):
return program
else:
for directory in path:
# De-quote directories.
directory = directory.strip('"')
exe_file = os.path.join(directory, program)
if fs.isexe(exe_file):
return exe_file
return None |
def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name | Returns which field the search index has marked as it's
`document=True` field. | Below is the the instruction that describes the task:
### Input:
Returns which field the search index has marked as it's
`document=True` field.
### Response:
def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name |
def pem2der(pem_string):
"""Convert PEM string to DER format"""
# Encode all lines between the first '-----\n' and the 2nd-to-last '-----'.
pem_string = pem_string.replace(b"\r", b"")
first_idx = pem_string.find(b"-----\n") + 6
if pem_string.find(b"-----BEGIN", first_idx) != -1:
raise Exception("pem2der() expects only one PEM-encoded object")
last_idx = pem_string.rfind(b"-----", 0, pem_string.rfind(b"-----"))
base64_string = pem_string[first_idx:last_idx]
base64_string.replace(b"\n", b"")
der_string = base64.b64decode(base64_string)
return der_string | Convert PEM string to DER format | Below is the the instruction that describes the task:
### Input:
Convert PEM string to DER format
### Response:
def pem2der(pem_string):
"""Convert PEM string to DER format"""
# Encode all lines between the first '-----\n' and the 2nd-to-last '-----'.
pem_string = pem_string.replace(b"\r", b"")
first_idx = pem_string.find(b"-----\n") + 6
if pem_string.find(b"-----BEGIN", first_idx) != -1:
raise Exception("pem2der() expects only one PEM-encoded object")
last_idx = pem_string.rfind(b"-----", 0, pem_string.rfind(b"-----"))
base64_string = pem_string[first_idx:last_idx]
base64_string.replace(b"\n", b"")
der_string = base64.b64decode(base64_string)
return der_string |
def list_datastores_full(kwargs=None, call=None):
'''
List all the datastores for this VMware environment, with extra information
CLI Example:
.. code-block:: bash
salt-cloud -f list_datastores_full my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_datastores_full function must be called with '
'-f or --function.'
)
return {'Datastores': salt.utils.vmware.list_datastores_full(_get_si())} | List all the datastores for this VMware environment, with extra information
CLI Example:
.. code-block:: bash
salt-cloud -f list_datastores_full my-vmware-config | Below is the the instruction that describes the task:
### Input:
List all the datastores for this VMware environment, with extra information
CLI Example:
.. code-block:: bash
salt-cloud -f list_datastores_full my-vmware-config
### Response:
def list_datastores_full(kwargs=None, call=None):
'''
List all the datastores for this VMware environment, with extra information
CLI Example:
.. code-block:: bash
salt-cloud -f list_datastores_full my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_datastores_full function must be called with '
'-f or --function.'
)
return {'Datastores': salt.utils.vmware.list_datastores_full(_get_si())} |
def cancel_job(self, job_resource_name: str):
"""Cancels the given job.
See also the cancel method on EngineJob.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
"""
self.service.projects().programs().jobs().cancel(
name=job_resource_name, body={}).execute() | Cancels the given job.
See also the cancel method on EngineJob.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`. | Below is the the instruction that describes the task:
### Input:
Cancels the given job.
See also the cancel method on EngineJob.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
### Response:
def cancel_job(self, job_resource_name: str):
"""Cancels the given job.
See also the cancel method on EngineJob.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
"""
self.service.projects().programs().jobs().cancel(
name=job_resource_name, body={}).execute() |
def get_print_list():
"""
get_print_list
"""
profiler = start_profile()
meth1()
meth2()
meth3()
meth4()
return end_profile(profiler, returnvalue=True) | get_print_list | Below is the the instruction that describes the task:
### Input:
get_print_list
### Response:
def get_print_list():
"""
get_print_list
"""
profiler = start_profile()
meth1()
meth2()
meth3()
meth4()
return end_profile(profiler, returnvalue=True) |
async def create(
cls, node: Union[Node, str],
cache_device: Union[BlockDevice, Partition]):
"""
Create a BcacheCacheSet on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param cache_device: Block device or partition to create
the cache set on.
:type cache_device: `BlockDevice` or `Partition`
"""
params = {}
if isinstance(node, str):
params['system_id'] = node
elif isinstance(node, Node):
params['system_id'] = node.system_id
else:
raise TypeError(
'node must be a Node or str, not %s' % (
type(node).__name__))
if isinstance(cache_device, BlockDevice):
params['cache_device'] = cache_device.id
elif isinstance(cache_device, Partition):
params['cache_partition'] = cache_device.id
else:
raise TypeError(
'cache_device must be a BlockDevice or Partition, not %s' % (
type(cache_device).__name__))
return cls._object(await cls._handler.create(**params)) | Create a BcacheCacheSet on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param cache_device: Block device or partition to create
the cache set on.
:type cache_device: `BlockDevice` or `Partition` | Below is the the instruction that describes the task:
### Input:
Create a BcacheCacheSet on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param cache_device: Block device or partition to create
the cache set on.
:type cache_device: `BlockDevice` or `Partition`
### Response:
async def create(
cls, node: Union[Node, str],
cache_device: Union[BlockDevice, Partition]):
"""
Create a BcacheCacheSet on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param cache_device: Block device or partition to create
the cache set on.
:type cache_device: `BlockDevice` or `Partition`
"""
params = {}
if isinstance(node, str):
params['system_id'] = node
elif isinstance(node, Node):
params['system_id'] = node.system_id
else:
raise TypeError(
'node must be a Node or str, not %s' % (
type(node).__name__))
if isinstance(cache_device, BlockDevice):
params['cache_device'] = cache_device.id
elif isinstance(cache_device, Partition):
params['cache_partition'] = cache_device.id
else:
raise TypeError(
'cache_device must be a BlockDevice or Partition, not %s' % (
type(cache_device).__name__))
return cls._object(await cls._handler.create(**params)) |
def ndim(self):
"""If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays."""
if self.is_raster():
return {
FeatureType.DATA: 4,
FeatureType.MASK: 4,
FeatureType.SCALAR: 2,
FeatureType.LABEL: 2,
FeatureType.DATA_TIMELESS: 3,
FeatureType.MASK_TIMELESS: 3,
FeatureType.SCALAR_TIMELESS: 1,
FeatureType.LABEL_TIMELESS: 1
}[self]
return None | If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays. | Below is the the instruction that describes the task:
### Input:
If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays.
### Response:
def ndim(self):
"""If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays."""
if self.is_raster():
return {
FeatureType.DATA: 4,
FeatureType.MASK: 4,
FeatureType.SCALAR: 2,
FeatureType.LABEL: 2,
FeatureType.DATA_TIMELESS: 3,
FeatureType.MASK_TIMELESS: 3,
FeatureType.SCALAR_TIMELESS: 1,
FeatureType.LABEL_TIMELESS: 1
}[self]
return None |
def each_object_id(collection):
"""Yields each object ID in the given ``collection``.
The objects are not loaded."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
match = regex.match(r'.+/(.+)\.%s$' % _ext, path)
yield match.groups()[0] | Yields each object ID in the given ``collection``.
The objects are not loaded. | Below is the the instruction that describes the task:
### Input:
Yields each object ID in the given ``collection``.
The objects are not loaded.
### Response:
def each_object_id(collection):
"""Yields each object ID in the given ``collection``.
The objects are not loaded."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
match = regex.match(r'.+/(.+)\.%s$' % _ext, path)
yield match.groups()[0] |
def apply_cast(scope, input_name, output_name, container, operator_name=None, to=None):
'''
:param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64.
'''
name = _create_name_or_use_existing_one(scope, 'Cast', operator_name)
attrs = {'name': name}
d = onnx_proto.TensorProto.DataType.DESCRIPTOR
allowed_type_name_and_type_enum_pairs = {v.number: k for k, v in d.values_by_name.items()}
if to not in allowed_type_name_and_type_enum_pairs:
raise ValueError('Attribute "to" must be one of %s' % allowed_type_name_and_type_enum_pairs.keys())
if container.target_opset < 9:
if to in [onnx_proto.TensorProto.STRING, onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
raise ValueError('Attribute "to" cannot correspond to a String or Complex TensorProto type.')
if container.target_opset < 6:
# Convert enum to string, for example, TensorProto.INT64 to 'INT64'
attrs['to'] = allowed_type_name_and_type_enum_pairs[to]
op_version = 1
else:
# Enum, for example, TensorProto.INT64
attrs['to'] = to
op_version = 6
else:
# Enum value, for example, TensorProto.INT64
# String casting is supported in opset 9
if to in [onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
raise ValueError('Attribute "to" cannot correspond to a Complex TensorProto type.')
attrs['to'] = to
op_version = 9
container.add_node('Cast', input_name, output_name, op_version=op_version, **attrs) | :param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64. | Below is the the instruction that describes the task:
### Input:
:param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64.
### Response:
def apply_cast(scope, input_name, output_name, container, operator_name=None, to=None):
'''
:param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64.
'''
name = _create_name_or_use_existing_one(scope, 'Cast', operator_name)
attrs = {'name': name}
d = onnx_proto.TensorProto.DataType.DESCRIPTOR
allowed_type_name_and_type_enum_pairs = {v.number: k for k, v in d.values_by_name.items()}
if to not in allowed_type_name_and_type_enum_pairs:
raise ValueError('Attribute "to" must be one of %s' % allowed_type_name_and_type_enum_pairs.keys())
if container.target_opset < 9:
if to in [onnx_proto.TensorProto.STRING, onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
raise ValueError('Attribute "to" cannot correspond to a String or Complex TensorProto type.')
if container.target_opset < 6:
# Convert enum to string, for example, TensorProto.INT64 to 'INT64'
attrs['to'] = allowed_type_name_and_type_enum_pairs[to]
op_version = 1
else:
# Enum, for example, TensorProto.INT64
attrs['to'] = to
op_version = 6
else:
# Enum value, for example, TensorProto.INT64
# String casting is supported in opset 9
if to in [onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
raise ValueError('Attribute "to" cannot correspond to a Complex TensorProto type.')
attrs['to'] = to
op_version = 9
container.add_node('Cast', input_name, output_name, op_version=op_version, **attrs) |
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list | Does the same as 'headers' except it is returned as a list. | Below is the the instruction that describes the task:
### Input:
Does the same as 'headers' except it is returned as a list.
### Response:
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list |
def parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values | Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item. | Below is the the instruction that describes the task:
### Input:
Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
### Response:
def parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values |
def authorized(self, request_token):
"""Create a verifier for an user authorized client"""
verifier = generate_token(length=self.verifier_length[1])
self.save_verifier(request_token, verifier)
response = [
(u'oauth_token', request_token),
(u'oauth_verifier', verifier)
]
callback = self.get_callback(request_token)
return redirect(add_params_to_uri(callback, response)) | Create a verifier for an user authorized client | Below is the the instruction that describes the task:
### Input:
Create a verifier for an user authorized client
### Response:
def authorized(self, request_token):
"""Create a verifier for an user authorized client"""
verifier = generate_token(length=self.verifier_length[1])
self.save_verifier(request_token, verifier)
response = [
(u'oauth_token', request_token),
(u'oauth_verifier', verifier)
]
callback = self.get_callback(request_token)
return redirect(add_params_to_uri(callback, response)) |
def ReadHuntCounters(self, hunt_id):
"""Reads hunt counters."""
num_clients = self.CountHuntFlows(hunt_id)
num_successful_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY)
num_failed_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.FAILED_FLOWS_ONLY)
num_clients_with_results = len(
set(r[0].client_id
for r in self.flow_results.values()
if r and r[0].hunt_id == hunt_id))
num_crashed_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.CRASHED_FLOWS_ONLY)
num_results = self.CountHuntResults(hunt_id)
total_cpu_seconds = 0
total_network_bytes_sent = 0
for f in self.ReadHuntFlows(hunt_id, 0, sys.maxsize):
total_cpu_seconds += (
f.cpu_time_used.user_cpu_time + f.cpu_time_used.system_cpu_time)
total_network_bytes_sent += f.network_bytes_sent
return db.HuntCounters(
num_clients=num_clients,
num_successful_clients=num_successful_clients,
num_failed_clients=num_failed_clients,
num_clients_with_results=num_clients_with_results,
num_crashed_clients=num_crashed_clients,
num_results=num_results,
total_cpu_seconds=total_cpu_seconds,
total_network_bytes_sent=total_network_bytes_sent) | Reads hunt counters. | Below is the the instruction that describes the task:
### Input:
Reads hunt counters.
### Response:
def ReadHuntCounters(self, hunt_id):
"""Reads hunt counters."""
num_clients = self.CountHuntFlows(hunt_id)
num_successful_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY)
num_failed_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.FAILED_FLOWS_ONLY)
num_clients_with_results = len(
set(r[0].client_id
for r in self.flow_results.values()
if r and r[0].hunt_id == hunt_id))
num_crashed_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.CRASHED_FLOWS_ONLY)
num_results = self.CountHuntResults(hunt_id)
total_cpu_seconds = 0
total_network_bytes_sent = 0
for f in self.ReadHuntFlows(hunt_id, 0, sys.maxsize):
total_cpu_seconds += (
f.cpu_time_used.user_cpu_time + f.cpu_time_used.system_cpu_time)
total_network_bytes_sent += f.network_bytes_sent
return db.HuntCounters(
num_clients=num_clients,
num_successful_clients=num_successful_clients,
num_failed_clients=num_failed_clients,
num_clients_with_results=num_clients_with_results,
num_crashed_clients=num_crashed_clients,
num_results=num_results,
total_cpu_seconds=total_cpu_seconds,
total_network_bytes_sent=total_network_bytes_sent) |
def tile(lt, width=70, gap=1):
"""
Pretty print list of items.
"""
from jcvi.utils.iter import grouper
max_len = max(len(x) for x in lt) + gap
items_per_line = max(width // max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=""))
return "\n".join("".join(x) for x in g) | Pretty print list of items. | Below is the the instruction that describes the task:
### Input:
Pretty print list of items.
### Response:
def tile(lt, width=70, gap=1):
"""
Pretty print list of items.
"""
from jcvi.utils.iter import grouper
max_len = max(len(x) for x in lt) + gap
items_per_line = max(width // max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=""))
return "\n".join("".join(x) for x in g) |
def delete(cls, repo, *refs, **kwargs):
"""Delete the given remote references
:note:
kwargs are given for comparability with the base class method as we
should not narrow the signature."""
repo.git.branch("-d", "-r", *refs)
# the official deletion method will ignore remote symbolic refs - these
# are generally ignored in the refs/ folder. We don't though
# and delete remainders manually
for ref in refs:
try:
os.remove(osp.join(repo.common_dir, ref.path))
except OSError:
pass
try:
os.remove(osp.join(repo.git_dir, ref.path))
except OSError:
pass | Delete the given remote references
:note:
kwargs are given for comparability with the base class method as we
should not narrow the signature. | Below is the the instruction that describes the task:
### Input:
Delete the given remote references
:note:
kwargs are given for comparability with the base class method as we
should not narrow the signature.
### Response:
def delete(cls, repo, *refs, **kwargs):
"""Delete the given remote references
:note:
kwargs are given for comparability with the base class method as we
should not narrow the signature."""
repo.git.branch("-d", "-r", *refs)
# the official deletion method will ignore remote symbolic refs - these
# are generally ignored in the refs/ folder. We don't though
# and delete remainders manually
for ref in refs:
try:
os.remove(osp.join(repo.common_dir, ref.path))
except OSError:
pass
try:
os.remove(osp.join(repo.git_dir, ref.path))
except OSError:
pass |
def move(self, fnames=None, directory=None):
"""Move files/directories"""
if fnames is None:
fnames = self.get_selected_filenames()
orig = fixpath(osp.dirname(fnames[0]))
while True:
self.redirect_stdio.emit(False)
if directory is None:
folder = getexistingdirectory(self, _("Select directory"),
orig)
else:
folder = directory
self.redirect_stdio.emit(True)
if folder:
folder = fixpath(folder)
if folder != orig:
break
else:
return
for fname in fnames:
basename = osp.basename(fname)
try:
misc.move_file(fname, osp.join(folder, basename))
except EnvironmentError as error:
QMessageBox.critical(self, _("Error"),
_("<b>Unable to move <i>%s</i></b>"
"<br><br>Error message:<br>%s"
) % (basename, to_text_string(error))) | Move files/directories | Below is the the instruction that describes the task:
### Input:
Move files/directories
### Response:
def move(self, fnames=None, directory=None):
"""Move files/directories"""
if fnames is None:
fnames = self.get_selected_filenames()
orig = fixpath(osp.dirname(fnames[0]))
while True:
self.redirect_stdio.emit(False)
if directory is None:
folder = getexistingdirectory(self, _("Select directory"),
orig)
else:
folder = directory
self.redirect_stdio.emit(True)
if folder:
folder = fixpath(folder)
if folder != orig:
break
else:
return
for fname in fnames:
basename = osp.basename(fname)
try:
misc.move_file(fname, osp.join(folder, basename))
except EnvironmentError as error:
QMessageBox.critical(self, _("Error"),
_("<b>Unable to move <i>%s</i></b>"
"<br><br>Error message:<br>%s"
) % (basename, to_text_string(error))) |
def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=None):
"""
Augmenter that embosses images and overlays the result with the original
image.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard
embossing effect. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``
and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``
over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
strength_param = iap.handle_continuous_param(strength, "strength", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
strength_sample = strength_param.draw_sample(random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1-strength_sample, 0-strength_sample, 0],
[0-strength_sample, 1, 0+strength_sample],
[0, 0+strength_sample, 1+strength_sample]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) | Augmenter that embosses images and overlays the result with the original
image.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard
embossing effect. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``
and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``
over the old image. | Below is the the instruction that describes the task:
### Input:
Augmenter that embosses images and overlays the result with the original
image.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard
embossing effect. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``
and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``
over the old image.
### Response:
def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=None):
"""
Augmenter that embosses images and overlays the result with the original
image.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard
embossing effect. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``
and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``
over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
strength_param = iap.handle_continuous_param(strength, "strength", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
strength_sample = strength_param.draw_sample(random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1-strength_sample, 0-strength_sample, 0],
[0-strength_sample, 1, 0+strength_sample],
[0, 0+strength_sample, 1+strength_sample]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) |
def add_port_to_free_pool(self, port):
"""Add a new port to the free pool for allocation."""
if port < 1 or port > 65535:
raise ValueError(
'Port must be in the [1, 65535] range, not %d.' % port)
port_info = _PortInfo(port=port)
self._port_queue.append(port_info) | Add a new port to the free pool for allocation. | Below is the the instruction that describes the task:
### Input:
Add a new port to the free pool for allocation.
### Response:
def add_port_to_free_pool(self, port):
"""Add a new port to the free pool for allocation."""
if port < 1 or port > 65535:
raise ValueError(
'Port must be in the [1, 65535] range, not %d.' % port)
port_info = _PortInfo(port=port)
self._port_queue.append(port_info) |
def parse_navigation_html_to_tree(html, id):
"""Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value.
"""
def xpath(x):
return html.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
try:
value = xpath('//*[@data-type="binding"]/@data-value')[0]
is_translucent = value == 'translucent'
except IndexError:
is_translucent = False
if is_translucent:
id = TRANSLUCENT_BINDER_ID
tree = {'id': id,
'title': xpath('//*[@data-type="document-title"]/text()')[0],
'contents': [x for x in _nav_to_tree(xpath('//xhtml:nav')[0])]
}
return tree | Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value. | Below is the the instruction that describes the task:
### Input:
Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value.
### Response:
def parse_navigation_html_to_tree(html, id):
"""Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value.
"""
def xpath(x):
return html.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
try:
value = xpath('//*[@data-type="binding"]/@data-value')[0]
is_translucent = value == 'translucent'
except IndexError:
is_translucent = False
if is_translucent:
id = TRANSLUCENT_BINDER_ID
tree = {'id': id,
'title': xpath('//*[@data-type="document-title"]/text()')[0],
'contents': [x for x in _nav_to_tree(xpath('//xhtml:nav')[0])]
}
return tree |
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
True
"""
quaternion = numpy.zeros((4, ), dtype=numpy.float64)
quaternion[:3] = axis[:3]
qlen = vector_norm(quaternion)
if qlen > _EPS:
quaternion *= math.sin(angle/2.0) / qlen
quaternion[3] = math.cos(angle/2.0)
return quaternion | Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
True | Below is the the instruction that describes the task:
### Input:
Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
True
### Response:
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
True
"""
quaternion = numpy.zeros((4, ), dtype=numpy.float64)
quaternion[:3] = axis[:3]
qlen = vector_norm(quaternion)
if qlen > _EPS:
quaternion *= math.sin(angle/2.0) / qlen
quaternion[3] = math.cos(angle/2.0)
return quaternion |
def check_for_cores(self):
"""! @brief Init task: verify that at least one core was discovered."""
if not len(self.cores):
# Allow the user to override the exception to enable uses like chip bringup.
if self.session.options.get('allow_no_cores', False):
logging.error("No cores were discovered!")
else:
raise exceptions.DebugError("No cores were discovered!") | ! @brief Init task: verify that at least one core was discovered. | Below is the the instruction that describes the task:
### Input:
! @brief Init task: verify that at least one core was discovered.
### Response:
def check_for_cores(self):
"""! @brief Init task: verify that at least one core was discovered."""
if not len(self.cores):
# Allow the user to override the exception to enable uses like chip bringup.
if self.session.options.get('allow_no_cores', False):
logging.error("No cores were discovered!")
else:
raise exceptions.DebugError("No cores were discovered!") |
def src_builder(self):
"""Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root).
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.dir.src_builder()
self.sbuilder = scb
return scb | Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root). | Below is the the instruction that describes the task:
### Input:
Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root).
### Response:
def src_builder(self):
"""Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root).
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.dir.src_builder()
self.sbuilder = scb
return scb |
def random_seed(seed=42):
""" sets the random seed of Python within the context.
Example
-------
>>> import random
>>> with random_seed(seed=0):
... random.randint(0, 1000) # doctest: +SKIP
864
"""
old_state = random.getstate()
random.seed(seed)
try:
yield
finally:
random.setstate(old_state) | sets the random seed of Python within the context.
Example
-------
>>> import random
>>> with random_seed(seed=0):
... random.randint(0, 1000) # doctest: +SKIP
864 | Below is the the instruction that describes the task:
### Input:
sets the random seed of Python within the context.
Example
-------
>>> import random
>>> with random_seed(seed=0):
... random.randint(0, 1000) # doctest: +SKIP
864
### Response:
def random_seed(seed=42):
""" sets the random seed of Python within the context.
Example
-------
>>> import random
>>> with random_seed(seed=0):
... random.randint(0, 1000) # doctest: +SKIP
864
"""
old_state = random.getstate()
random.seed(seed)
try:
yield
finally:
random.setstate(old_state) |
def get_instance(self, payload):
"""
Build an instance of ConferenceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.conference.ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
"""
return ConferenceInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of ConferenceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.conference.ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of ConferenceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.conference.ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of ConferenceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.conference.ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
"""
return ConferenceInstance(self._version, payload, account_sid=self._solution['account_sid'], ) |
def _sample_3d(self, n, seed=None):
"""Specialized inversion sampler for 3D."""
seed = seed_stream.SeedStream(seed, salt='von_mises_fisher_3d')
u_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0)
z = tf.random.uniform(u_shape, seed=seed(), dtype=self.dtype)
# TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could
# be bisected for bounded sampling runtime (i.e. not rejection sampling).
# [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/
# The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa
# We must protect against both kappa and z being zero.
safe_conc = tf.where(self.concentration > 0,
self.concentration,
tf.ones_like(self.concentration))
safe_z = tf.where(z > 0, z, tf.ones_like(z))
safe_u = 1 + tf.reduce_logsumexp(
input_tensor=[
tf.math.log(safe_z),
tf.math.log1p(-safe_z) - 2 * safe_conc
],
axis=0) / safe_conc
# Limit of the above expression as kappa->0 is 2*z-1
u = tf.where(self.concentration > tf.zeros_like(safe_u), safe_u,
2 * z - 1)
# Limit of the expression as z->0 is -1.
u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u)
if not self._allow_nan_stats:
u = tf.debugging.check_numerics(u, 'u in _sample_3d')
return u[..., tf.newaxis] | Specialized inversion sampler for 3D. | Below is the the instruction that describes the task:
### Input:
Specialized inversion sampler for 3D.
### Response:
def _sample_3d(self, n, seed=None):
"""Specialized inversion sampler for 3D."""
seed = seed_stream.SeedStream(seed, salt='von_mises_fisher_3d')
u_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0)
z = tf.random.uniform(u_shape, seed=seed(), dtype=self.dtype)
# TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could
# be bisected for bounded sampling runtime (i.e. not rejection sampling).
# [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/
# The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa
# We must protect against both kappa and z being zero.
safe_conc = tf.where(self.concentration > 0,
self.concentration,
tf.ones_like(self.concentration))
safe_z = tf.where(z > 0, z, tf.ones_like(z))
safe_u = 1 + tf.reduce_logsumexp(
input_tensor=[
tf.math.log(safe_z),
tf.math.log1p(-safe_z) - 2 * safe_conc
],
axis=0) / safe_conc
# Limit of the above expression as kappa->0 is 2*z-1
u = tf.where(self.concentration > tf.zeros_like(safe_u), safe_u,
2 * z - 1)
# Limit of the expression as z->0 is -1.
u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u)
if not self._allow_nan_stats:
u = tf.debugging.check_numerics(u, 'u in _sample_3d')
return u[..., tf.newaxis] |
def importPreflibFile(self, fileName):
"""
Imports a preflib format file that contains all the information of a Profile. This function
will completely override all members of the current Profile object. Currently, we assume
that in an election where incomplete ordering are allowed, if a voter ranks only one
candidate, then the voter did not prefer any candidates over another. This may lead to some
discrepancies when importing and exporting a .toi preflib file or a .soi preflib file.
:ivar str fileName: The name of the input file to be imported.
"""
# Use the functionality found in io to read the file.
elecFileObj = open(fileName, 'r')
self.candMap, rankMaps, wmgMapsCounts, self.numVoters = prefpy_io.read_election_file(elecFileObj)
elecFileObj.close()
self.numCands = len(self.candMap.keys())
# Go through the rankMaps and generate a wmgMap for each vote. Use the wmgMap to create a
# Preference object.
self.preferences = []
for i in range(0, len(rankMaps)):
wmgMap = self.genWmgMapFromRankMap(rankMaps[i])
self.preferences.append(Preference(wmgMap, wmgMapsCounts[i])) | Imports a preflib format file that contains all the information of a Profile. This function
will completely override all members of the current Profile object. Currently, we assume
that in an election where incomplete ordering are allowed, if a voter ranks only one
candidate, then the voter did not prefer any candidates over another. This may lead to some
discrepancies when importing and exporting a .toi preflib file or a .soi preflib file.
:ivar str fileName: The name of the input file to be imported. | Below is the the instruction that describes the task:
### Input:
Imports a preflib format file that contains all the information of a Profile. This function
will completely override all members of the current Profile object. Currently, we assume
that in an election where incomplete ordering are allowed, if a voter ranks only one
candidate, then the voter did not prefer any candidates over another. This may lead to some
discrepancies when importing and exporting a .toi preflib file or a .soi preflib file.
:ivar str fileName: The name of the input file to be imported.
### Response:
def importPreflibFile(self, fileName):
"""
Imports a preflib format file that contains all the information of a Profile. This function
will completely override all members of the current Profile object. Currently, we assume
that in an election where incomplete ordering are allowed, if a voter ranks only one
candidate, then the voter did not prefer any candidates over another. This may lead to some
discrepancies when importing and exporting a .toi preflib file or a .soi preflib file.
:ivar str fileName: The name of the input file to be imported.
"""
# Use the functionality found in io to read the file.
elecFileObj = open(fileName, 'r')
self.candMap, rankMaps, wmgMapsCounts, self.numVoters = prefpy_io.read_election_file(elecFileObj)
elecFileObj.close()
self.numCands = len(self.candMap.keys())
# Go through the rankMaps and generate a wmgMap for each vote. Use the wmgMap to create a
# Preference object.
self.preferences = []
for i in range(0, len(rankMaps)):
wmgMap = self.genWmgMapFromRankMap(rankMaps[i])
self.preferences.append(Preference(wmgMap, wmgMapsCounts[i])) |
def randomTraversal(sensations, numTraversals):
"""
Given a list of sensations, return the SDRs that would be obtained by
numTraversals random traversals of that set of sensations.
Each sensation is a dict mapping cortical column index to a pair of SDR's
(one location and one feature).
"""
newSensations = []
for _ in range(numTraversals):
s = copy.deepcopy(sensations)
random.shuffle(s)
newSensations += s
return newSensations | Given a list of sensations, return the SDRs that would be obtained by
numTraversals random traversals of that set of sensations.
Each sensation is a dict mapping cortical column index to a pair of SDR's
(one location and one feature). | Below is the the instruction that describes the task:
### Input:
Given a list of sensations, return the SDRs that would be obtained by
numTraversals random traversals of that set of sensations.
Each sensation is a dict mapping cortical column index to a pair of SDR's
(one location and one feature).
### Response:
def randomTraversal(sensations, numTraversals):
"""
Given a list of sensations, return the SDRs that would be obtained by
numTraversals random traversals of that set of sensations.
Each sensation is a dict mapping cortical column index to a pair of SDR's
(one location and one feature).
"""
newSensations = []
for _ in range(numTraversals):
s = copy.deepcopy(sensations)
random.shuffle(s)
newSensations += s
return newSensations |
def actually_start_agent(self, descriptor, **kwargs):
"""
This method will be run only on the master agency.
"""
factory = IAgentFactory(
applications.lookup_agent(descriptor.type_name))
if factory.standalone:
return self.start_standalone_agent(descriptor, factory, **kwargs)
else:
return self.start_agent_locally(descriptor, **kwargs) | This method will be run only on the master agency. | Below is the the instruction that describes the task:
### Input:
This method will be run only on the master agency.
### Response:
def actually_start_agent(self, descriptor, **kwargs):
"""
This method will be run only on the master agency.
"""
factory = IAgentFactory(
applications.lookup_agent(descriptor.type_name))
if factory.standalone:
return self.start_standalone_agent(descriptor, factory, **kwargs)
else:
return self.start_agent_locally(descriptor, **kwargs) |
def hash_trees(self):
"hash ladderized tree topologies"
observed = {}
for idx, tree in enumerate(self.treelist):
nwk = tree.write(tree_format=9)
hashed = md5(nwk.encode("utf-8")).hexdigest()
if hashed not in observed:
observed[hashed] = idx
self.treedict[idx] = 1
else:
idx = observed[hashed]
self.treedict[idx] += 1 | hash ladderized tree topologies | Below is the the instruction that describes the task:
### Input:
hash ladderized tree topologies
### Response:
def hash_trees(self):
"hash ladderized tree topologies"
observed = {}
for idx, tree in enumerate(self.treelist):
nwk = tree.write(tree_format=9)
hashed = md5(nwk.encode("utf-8")).hexdigest()
if hashed not in observed:
observed[hashed] = idx
self.treedict[idx] = 1
else:
idx = observed[hashed]
self.treedict[idx] += 1 |
def end_segment(self, end_time=None):
"""
End the current active segment.
:param int end_time: epoch in seconds. If not specified the current
system time will be used.
"""
entity = self.get_trace_entity()
if not entity:
log.warning("No segment to end")
return
if self._is_subsegment(entity):
entity.parent_segment.close(end_time)
else:
entity.close(end_time) | End the current active segment.
:param int end_time: epoch in seconds. If not specified the current
system time will be used. | Below is the the instruction that describes the task:
### Input:
End the current active segment.
:param int end_time: epoch in seconds. If not specified the current
system time will be used.
### Response:
def end_segment(self, end_time=None):
"""
End the current active segment.
:param int end_time: epoch in seconds. If not specified the current
system time will be used.
"""
entity = self.get_trace_entity()
if not entity:
log.warning("No segment to end")
return
if self._is_subsegment(entity):
entity.parent_segment.close(end_time)
else:
entity.close(end_time) |
def get_students(
self,
gradebook_id='',
simple=False,
section_name='',
include_photo=False,
include_grade_info=False,
include_grade_history=False,
include_makeup_grades=False
):
"""Get students for a gradebook.
Get a list of students for a given gradebook,
specified by a gradebook id. Does not include grade data.
Args:
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
simple (bool):
if ``True``, just return dictionary with keys ``email``,
``name``, ``section``, default = ``False``
section_name (str): section name
include_photo (bool): include student photo, default= ``False``
include_grade_info (bool):
include student's grade info, default= ``False``
include_grade_history (bool):
include student's grade history, default= ``False``
include_makeup_grades (bool):
include student's makeup grades, default= ``False``
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: list of student dictionaries
.. code-block:: python
[{
u'accountEmail': u'[email protected]',
u'displayName': u'Molly Parker',
u'photoUrl': None,
u'middleName': None,
u'section': u'Unassigned',
u'sectionId': 1293925,
u'editable': False,
u'overallGradeInformation': None,
u'studentId': 1145,
u'studentAssignmentInfo': None,
u'sortableName': u'Parker, Molly',
u'surname': u'Parker',
u'givenName': u'Molly',
u'nickName': u'Molly',
u'email': u'[email protected]'
},]
"""
# These are parameters required for the remote API call, so
# there aren't too many arguments, or too many variables
# pylint: disable=too-many-arguments,too-many-locals
# Set params by arguments
params = dict(
includePhoto=json.dumps(include_photo),
includeGradeInfo=json.dumps(include_grade_info),
includeGradeHistory=json.dumps(include_grade_history),
includeMakeupGrades=json.dumps(include_makeup_grades),
)
url = 'students/{gradebookId}'
if section_name:
group_id, _ = self.get_section_by_name(section_name)
if group_id is None:
failure_message = (
'in get_students -- Error: '
'No such section %s' % section_name
)
log.critical(failure_message)
raise PyLmodNoSuchSection(failure_message)
url += '/section/{0}'.format(group_id)
student_data = self.get(
url.format(
gradebookId=gradebook_id or self.gradebook_id
),
params=params,
)
if simple:
# just return dict with keys email, name, section
student_map = dict(
accountEmail='email',
displayName='name',
section='section'
)
def remap(students):
"""Convert mit.edu domain to upper-case for student emails.
The mit.edu domain for user email must be upper-case,
i.e. MIT.EDU.
Args:
students (list): list of students
Returns:
dict: dictionary of updated student email domains
"""
newx = dict((student_map[k], students[k]) for k in student_map)
# match certs
newx['email'] = newx['email'].replace('@mit.edu', '@MIT.EDU')
return newx
return [remap(x) for x in student_data['data']]
return student_data['data'] | Get students for a gradebook.
Get a list of students for a given gradebook,
specified by a gradebook id. Does not include grade data.
Args:
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
simple (bool):
if ``True``, just return dictionary with keys ``email``,
``name``, ``section``, default = ``False``
section_name (str): section name
include_photo (bool): include student photo, default= ``False``
include_grade_info (bool):
include student's grade info, default= ``False``
include_grade_history (bool):
include student's grade history, default= ``False``
include_makeup_grades (bool):
include student's makeup grades, default= ``False``
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: list of student dictionaries
.. code-block:: python
[{
u'accountEmail': u'[email protected]',
u'displayName': u'Molly Parker',
u'photoUrl': None,
u'middleName': None,
u'section': u'Unassigned',
u'sectionId': 1293925,
u'editable': False,
u'overallGradeInformation': None,
u'studentId': 1145,
u'studentAssignmentInfo': None,
u'sortableName': u'Parker, Molly',
u'surname': u'Parker',
u'givenName': u'Molly',
u'nickName': u'Molly',
u'email': u'[email protected]'
},] | Below is the the instruction that describes the task:
### Input:
Get students for a gradebook.
Get a list of students for a given gradebook,
specified by a gradebook id. Does not include grade data.
Args:
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
simple (bool):
if ``True``, just return dictionary with keys ``email``,
``name``, ``section``, default = ``False``
section_name (str): section name
include_photo (bool): include student photo, default= ``False``
include_grade_info (bool):
include student's grade info, default= ``False``
include_grade_history (bool):
include student's grade history, default= ``False``
include_makeup_grades (bool):
include student's makeup grades, default= ``False``
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: list of student dictionaries
.. code-block:: python
[{
u'accountEmail': u'[email protected]',
u'displayName': u'Molly Parker',
u'photoUrl': None,
u'middleName': None,
u'section': u'Unassigned',
u'sectionId': 1293925,
u'editable': False,
u'overallGradeInformation': None,
u'studentId': 1145,
u'studentAssignmentInfo': None,
u'sortableName': u'Parker, Molly',
u'surname': u'Parker',
u'givenName': u'Molly',
u'nickName': u'Molly',
u'email': u'[email protected]'
},]
### Response:
def get_students(
self,
gradebook_id='',
simple=False,
section_name='',
include_photo=False,
include_grade_info=False,
include_grade_history=False,
include_makeup_grades=False
):
"""Get students for a gradebook.
Get a list of students for a given gradebook,
specified by a gradebook id. Does not include grade data.
Args:
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
simple (bool):
if ``True``, just return dictionary with keys ``email``,
``name``, ``section``, default = ``False``
section_name (str): section name
include_photo (bool): include student photo, default= ``False``
include_grade_info (bool):
include student's grade info, default= ``False``
include_grade_history (bool):
include student's grade history, default= ``False``
include_makeup_grades (bool):
include student's makeup grades, default= ``False``
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: list of student dictionaries
.. code-block:: python
[{
u'accountEmail': u'[email protected]',
u'displayName': u'Molly Parker',
u'photoUrl': None,
u'middleName': None,
u'section': u'Unassigned',
u'sectionId': 1293925,
u'editable': False,
u'overallGradeInformation': None,
u'studentId': 1145,
u'studentAssignmentInfo': None,
u'sortableName': u'Parker, Molly',
u'surname': u'Parker',
u'givenName': u'Molly',
u'nickName': u'Molly',
u'email': u'[email protected]'
},]
"""
# These are parameters required for the remote API call, so
# there aren't too many arguments, or too many variables
# pylint: disable=too-many-arguments,too-many-locals
# Set params by arguments
params = dict(
includePhoto=json.dumps(include_photo),
includeGradeInfo=json.dumps(include_grade_info),
includeGradeHistory=json.dumps(include_grade_history),
includeMakeupGrades=json.dumps(include_makeup_grades),
)
url = 'students/{gradebookId}'
if section_name:
group_id, _ = self.get_section_by_name(section_name)
if group_id is None:
failure_message = (
'in get_students -- Error: '
'No such section %s' % section_name
)
log.critical(failure_message)
raise PyLmodNoSuchSection(failure_message)
url += '/section/{0}'.format(group_id)
student_data = self.get(
url.format(
gradebookId=gradebook_id or self.gradebook_id
),
params=params,
)
if simple:
# just return dict with keys email, name, section
student_map = dict(
accountEmail='email',
displayName='name',
section='section'
)
def remap(students):
"""Convert mit.edu domain to upper-case for student emails.
The mit.edu domain for user email must be upper-case,
i.e. MIT.EDU.
Args:
students (list): list of students
Returns:
dict: dictionary of updated student email domains
"""
newx = dict((student_map[k], students[k]) for k in student_map)
# match certs
newx['email'] = newx['email'].replace('@mit.edu', '@MIT.EDU')
return newx
return [remap(x) for x in student_data['data']]
return student_data['data'] |
def _startGenresNode(self, name, attrs):
"""Process the start of a node under xtvd/genres"""
if name == 'programGenre':
self._programId = attrs.get('program')
elif name == 'genre':
self._genre = None
self._relevance = None | Process the start of a node under xtvd/genres | Below is the the instruction that describes the task:
### Input:
Process the start of a node under xtvd/genres
### Response:
def _startGenresNode(self, name, attrs):
"""Process the start of a node under xtvd/genres"""
if name == 'programGenre':
self._programId = attrs.get('program')
elif name == 'genre':
self._genre = None
self._relevance = None |
def _get_inline_fragment(ast):
"""Return the inline fragment at the current AST node, or None if no fragment exists."""
if not ast.selection_set:
# There is nothing selected here, so no fragment.
return None
fragments = [
ast_node
for ast_node in ast.selection_set.selections
if isinstance(ast_node, InlineFragment)
]
if not fragments:
return None
if len(fragments) > 1:
raise GraphQLCompilationError(u'Cannot compile GraphQL with more than one fragment in '
u'a given selection set.')
return fragments[0] | Return the inline fragment at the current AST node, or None if no fragment exists. | Below is the the instruction that describes the task:
### Input:
Return the inline fragment at the current AST node, or None if no fragment exists.
### Response:
def _get_inline_fragment(ast):
"""Return the inline fragment at the current AST node, or None if no fragment exists."""
if not ast.selection_set:
# There is nothing selected here, so no fragment.
return None
fragments = [
ast_node
for ast_node in ast.selection_set.selections
if isinstance(ast_node, InlineFragment)
]
if not fragments:
return None
if len(fragments) > 1:
raise GraphQLCompilationError(u'Cannot compile GraphQL with more than one fragment in '
u'a given selection set.')
return fragments[0] |
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while self.cookedq.tell() == 0 and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq.getvalue()
self.cookedq.seek(0)
self.cookedq.truncate()
return buf | Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available. | Below is the the instruction that describes the task:
### Input:
Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
### Response:
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while self.cookedq.tell() == 0 and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq.getvalue()
self.cookedq.seek(0)
self.cookedq.truncate()
return buf |
def _submit(self, body, future):
"""Enqueue a problem for submission to the server.
This method is thread safe.
"""
self._submission_queue.put(self._submit.Message(body, future)) | Enqueue a problem for submission to the server.
This method is thread safe. | Below is the the instruction that describes the task:
### Input:
Enqueue a problem for submission to the server.
This method is thread safe.
### Response:
def _submit(self, body, future):
"""Enqueue a problem for submission to the server.
This method is thread safe.
"""
self._submission_queue.put(self._submit.Message(body, future)) |
def get_creation_datetime(filepath):
"""
Get the date that a file was created.
Parameters
----------
filepath : str
Returns
-------
creation_datetime : datetime.datetime or None
"""
if platform.system() == 'Windows':
return datetime.fromtimestamp(os.path.getctime(filepath))
else:
stat = os.stat(filepath)
try:
return datetime.fromtimestamp(stat.st_birthtime)
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return None | Get the date that a file was created.
Parameters
----------
filepath : str
Returns
-------
creation_datetime : datetime.datetime or None | Below is the the instruction that describes the task:
### Input:
Get the date that a file was created.
Parameters
----------
filepath : str
Returns
-------
creation_datetime : datetime.datetime or None
### Response:
def get_creation_datetime(filepath):
"""
Get the date that a file was created.
Parameters
----------
filepath : str
Returns
-------
creation_datetime : datetime.datetime or None
"""
if platform.system() == 'Windows':
return datetime.fromtimestamp(os.path.getctime(filepath))
else:
stat = os.stat(filepath)
try:
return datetime.fromtimestamp(stat.st_birthtime)
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return None |
def update_counts(self, current):
"""
updates counts for the class instance based on the current dictionary
counts
args:
-----
current: current dictionary counts
"""
for item in current:
try:
self.counts[item] += 1
except KeyError:
self.counts[item] = 1 | updates counts for the class instance based on the current dictionary
counts
args:
-----
current: current dictionary counts | Below is the the instruction that describes the task:
### Input:
updates counts for the class instance based on the current dictionary
counts
args:
-----
current: current dictionary counts
### Response:
def update_counts(self, current):
"""
updates counts for the class instance based on the current dictionary
counts
args:
-----
current: current dictionary counts
"""
for item in current:
try:
self.counts[item] += 1
except KeyError:
self.counts[item] = 1 |
def create_mixin(self):
"""
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
"""
_builder = self
class CustomModelMixin(object):
@cached_property
def _content_type(self):
return ContentType.objects.get_for_model(self)
@classmethod
def get_model_custom_fields(cls):
""" Return a list of custom fields for this model, callable at model level """
return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls))
def get_custom_fields(self):
""" Return a list of custom fields for this model """
return _builder.fields_model_class.objects.filter(content_type=self._content_type)
def get_custom_value(self, field):
""" Get a value for a specified custom field """
return _builder.values_model_class.objects.get(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
def set_custom_value(self, field, value):
""" Set a value for a specified custom field """
custom_value, created = \
_builder.values_model_class.objects.get_or_create(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
custom_value.value = value
custom_value.full_clean()
custom_value.save()
return custom_value
#def __getattr__(self, name):
# """ Get a value for a specified custom field """
# try:
# obj = _builder.values_model_class.objects.get(custom_field__name=name,
# content_type=self._content_type,
# object_id=self.pk)
# return obj.value
# except ObjectDoesNotExist:
# pass
# return super(CustomModelMixin, self).__getattr__(name)
return CustomModelMixin | This will create the custom Model Mixin to attach to your custom field
enabled model.
:return: | Below is the the instruction that describes the task:
### Input:
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
### Response:
def create_mixin(self):
"""
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
"""
_builder = self
class CustomModelMixin(object):
@cached_property
def _content_type(self):
return ContentType.objects.get_for_model(self)
@classmethod
def get_model_custom_fields(cls):
""" Return a list of custom fields for this model, callable at model level """
return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls))
def get_custom_fields(self):
""" Return a list of custom fields for this model """
return _builder.fields_model_class.objects.filter(content_type=self._content_type)
def get_custom_value(self, field):
""" Get a value for a specified custom field """
return _builder.values_model_class.objects.get(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
def set_custom_value(self, field, value):
""" Set a value for a specified custom field """
custom_value, created = \
_builder.values_model_class.objects.get_or_create(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
custom_value.value = value
custom_value.full_clean()
custom_value.save()
return custom_value
#def __getattr__(self, name):
# """ Get a value for a specified custom field """
# try:
# obj = _builder.values_model_class.objects.get(custom_field__name=name,
# content_type=self._content_type,
# object_id=self.pk)
# return obj.value
# except ObjectDoesNotExist:
# pass
# return super(CustomModelMixin, self).__getattr__(name)
return CustomModelMixin |
def __dump_docker_compose(path, content, already_existed):
'''
Dumps
:param path:
:param content: the not-yet dumped content
:return:
'''
try:
dumped = yaml.safe_dump(content, indent=2, default_flow_style=False)
return __write_docker_compose(path, dumped, already_existed)
except TypeError as t_err:
msg = 'Could not dump {0} {1}'.format(content, t_err)
return __standardize_result(False, msg,
None, None) | Dumps
:param path:
:param content: the not-yet dumped content
:return: | Below is the the instruction that describes the task:
### Input:
Dumps
:param path:
:param content: the not-yet dumped content
:return:
### Response:
def __dump_docker_compose(path, content, already_existed):
'''
Dumps
:param path:
:param content: the not-yet dumped content
:return:
'''
try:
dumped = yaml.safe_dump(content, indent=2, default_flow_style=False)
return __write_docker_compose(path, dumped, already_existed)
except TypeError as t_err:
msg = 'Could not dump {0} {1}'.format(content, t_err)
return __standardize_result(False, msg,
None, None) |
def _from_dict(cls, _dict):
"""Initialize a Log object from a json dictionary."""
args = {}
if 'request' in _dict:
args['request'] = MessageRequest._from_dict(_dict.get('request'))
else:
raise ValueError(
'Required property \'request\' not present in Log JSON')
if 'response' in _dict:
args['response'] = MessageResponse._from_dict(_dict.get('response'))
else:
raise ValueError(
'Required property \'response\' not present in Log JSON')
if 'log_id' in _dict:
args['log_id'] = _dict.get('log_id')
else:
raise ValueError(
'Required property \'log_id\' not present in Log JSON')
if 'request_timestamp' in _dict:
args['request_timestamp'] = _dict.get('request_timestamp')
else:
raise ValueError(
'Required property \'request_timestamp\' not present in Log JSON'
)
if 'response_timestamp' in _dict:
args['response_timestamp'] = _dict.get('response_timestamp')
else:
raise ValueError(
'Required property \'response_timestamp\' not present in Log JSON'
)
if 'workspace_id' in _dict:
args['workspace_id'] = _dict.get('workspace_id')
else:
raise ValueError(
'Required property \'workspace_id\' not present in Log JSON')
if 'language' in _dict:
args['language'] = _dict.get('language')
else:
raise ValueError(
'Required property \'language\' not present in Log JSON')
return cls(**args) | Initialize a Log object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a Log object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a Log object from a json dictionary."""
args = {}
if 'request' in _dict:
args['request'] = MessageRequest._from_dict(_dict.get('request'))
else:
raise ValueError(
'Required property \'request\' not present in Log JSON')
if 'response' in _dict:
args['response'] = MessageResponse._from_dict(_dict.get('response'))
else:
raise ValueError(
'Required property \'response\' not present in Log JSON')
if 'log_id' in _dict:
args['log_id'] = _dict.get('log_id')
else:
raise ValueError(
'Required property \'log_id\' not present in Log JSON')
if 'request_timestamp' in _dict:
args['request_timestamp'] = _dict.get('request_timestamp')
else:
raise ValueError(
'Required property \'request_timestamp\' not present in Log JSON'
)
if 'response_timestamp' in _dict:
args['response_timestamp'] = _dict.get('response_timestamp')
else:
raise ValueError(
'Required property \'response_timestamp\' not present in Log JSON'
)
if 'workspace_id' in _dict:
args['workspace_id'] = _dict.get('workspace_id')
else:
raise ValueError(
'Required property \'workspace_id\' not present in Log JSON')
if 'language' in _dict:
args['language'] = _dict.get('language')
else:
raise ValueError(
'Required property \'language\' not present in Log JSON')
return cls(**args) |
def _report_container_count(self, containers_by_id):
"""Report container count per state"""
m_func = FUNC_MAP[GAUGE][self.use_histogram]
per_state_count = defaultdict(int)
filterlambda = lambda ctr: not self._is_container_excluded(ctr)
containers = list(filter(filterlambda, containers_by_id.values()))
for ctr in containers:
per_state_count[ctr.get('State', '')] += 1
for state in per_state_count:
if state:
m_func(self, 'docker.container.count', per_state_count[state], tags=['container_state:%s' % state.lower()]) | Report container count per state | Below is the the instruction that describes the task:
### Input:
Report container count per state
### Response:
def _report_container_count(self, containers_by_id):
"""Report container count per state"""
m_func = FUNC_MAP[GAUGE][self.use_histogram]
per_state_count = defaultdict(int)
filterlambda = lambda ctr: not self._is_container_excluded(ctr)
containers = list(filter(filterlambda, containers_by_id.values()))
for ctr in containers:
per_state_count[ctr.get('State', '')] += 1
for state in per_state_count:
if state:
m_func(self, 'docker.container.count', per_state_count[state], tags=['container_state:%s' % state.lower()]) |
def create(self, order_increment_id,
items_qty, comment=None, email=True, include_comment=False):
"""
Create new shipment for order
:param order_increment_id: Order Increment ID
:type order_increment_id: str
:param items_qty: items qty to ship
:type items_qty: associative array (order_item_id ⇒ qty) as dict
:param comment: Shipment Comment
:type comment: str
:param email: send e-mail flag (optional)
:type email: bool
:param include_comment: include comment in e-mail flag (optional)
:type include_comment: bool
"""
if comment is None:
comment = ''
return self.call(
'sales_order_shipment.create', [
order_increment_id, items_qty, comment, email, include_comment
]
) | Create new shipment for order
:param order_increment_id: Order Increment ID
:type order_increment_id: str
:param items_qty: items qty to ship
:type items_qty: associative array (order_item_id ⇒ qty) as dict
:param comment: Shipment Comment
:type comment: str
:param email: send e-mail flag (optional)
:type email: bool
:param include_comment: include comment in e-mail flag (optional)
:type include_comment: bool | Below is the the instruction that describes the task:
### Input:
Create new shipment for order
:param order_increment_id: Order Increment ID
:type order_increment_id: str
:param items_qty: items qty to ship
:type items_qty: associative array (order_item_id ⇒ qty) as dict
:param comment: Shipment Comment
:type comment: str
:param email: send e-mail flag (optional)
:type email: bool
:param include_comment: include comment in e-mail flag (optional)
:type include_comment: bool
### Response:
def create(self, order_increment_id,
items_qty, comment=None, email=True, include_comment=False):
"""
Create new shipment for order
:param order_increment_id: Order Increment ID
:type order_increment_id: str
:param items_qty: items qty to ship
:type items_qty: associative array (order_item_id ⇒ qty) as dict
:param comment: Shipment Comment
:type comment: str
:param email: send e-mail flag (optional)
:type email: bool
:param include_comment: include comment in e-mail flag (optional)
:type include_comment: bool
"""
if comment is None:
comment = ''
return self.call(
'sales_order_shipment.create', [
order_increment_id, items_qty, comment, email, include_comment
]
) |
def _find_by_id(self, id):
"""
Expected response:
{
"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
"Created": "2013-05-07T14:51:42.041847+02:00",
"Path": "date",
"Args": [],
"Config": {
"Hostname": "4fa6e0f0c678",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": false,
"AttachStdout": true,
"AttachStderr": true,
"PortSpecs": null,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": null,
"Cmd": [
"date"
],
"Dns": null,
"Image": "base",
"Volumes": {},
"VolumesFrom": "",
"WorkingDir":""
},
"State": {
"Running": false,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-05-07T14:51:42.087658+02:01360",
"Ghost": false
},
"Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"NetworkSettings": {
"IpAddress": "",
"IpPrefixLen": 0,
"Gateway": "",
"Bridge": "",
"PortMapping": null
},
"SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
"ResolvConfPath": "/etc/resolv.conf",
"Volumes": {},
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LxcConf": [],
"Privileged": false,
"PortBindings": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "49153"
}
]
},
"Links": ["/name:alias"],
"PublishAllPorts": false,
"CapAdd: ["NET_ADMIN"],
"CapDrop: ["MKNOD"]
}
}
"""
if not isinstance(id, six.string_types):
raise TypeError('must supply a string as the id')
# TODO: We should probably catch container not found error and return out own errors.
response = normalize_keys(self.client.inspect_container(id))
# TODO: normalize response to change - to _
self.id = response['id']
self.name = response['name'].replace('/', '')
self.image = response['image']
# come back and figure the timezone stuff out later.
self.created_at = dateutil.parser.parse(response['created'], ignoretz=True)
self.config = ContainerConfig(response['config'])
self.host_config = HostConfig(response['host_config'])
if self._transcribe:
self.start_transcribing() | Expected response:
{
"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
"Created": "2013-05-07T14:51:42.041847+02:00",
"Path": "date",
"Args": [],
"Config": {
"Hostname": "4fa6e0f0c678",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": false,
"AttachStdout": true,
"AttachStderr": true,
"PortSpecs": null,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": null,
"Cmd": [
"date"
],
"Dns": null,
"Image": "base",
"Volumes": {},
"VolumesFrom": "",
"WorkingDir":""
},
"State": {
"Running": false,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-05-07T14:51:42.087658+02:01360",
"Ghost": false
},
"Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"NetworkSettings": {
"IpAddress": "",
"IpPrefixLen": 0,
"Gateway": "",
"Bridge": "",
"PortMapping": null
},
"SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
"ResolvConfPath": "/etc/resolv.conf",
"Volumes": {},
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LxcConf": [],
"Privileged": false,
"PortBindings": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "49153"
}
]
},
"Links": ["/name:alias"],
"PublishAllPorts": false,
"CapAdd: ["NET_ADMIN"],
"CapDrop: ["MKNOD"]
}
} | Below is the the instruction that describes the task:
### Input:
Expected response:
{
"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
"Created": "2013-05-07T14:51:42.041847+02:00",
"Path": "date",
"Args": [],
"Config": {
"Hostname": "4fa6e0f0c678",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": false,
"AttachStdout": true,
"AttachStderr": true,
"PortSpecs": null,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": null,
"Cmd": [
"date"
],
"Dns": null,
"Image": "base",
"Volumes": {},
"VolumesFrom": "",
"WorkingDir":""
},
"State": {
"Running": false,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-05-07T14:51:42.087658+02:01360",
"Ghost": false
},
"Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"NetworkSettings": {
"IpAddress": "",
"IpPrefixLen": 0,
"Gateway": "",
"Bridge": "",
"PortMapping": null
},
"SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
"ResolvConfPath": "/etc/resolv.conf",
"Volumes": {},
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LxcConf": [],
"Privileged": false,
"PortBindings": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "49153"
}
]
},
"Links": ["/name:alias"],
"PublishAllPorts": false,
"CapAdd: ["NET_ADMIN"],
"CapDrop: ["MKNOD"]
}
}
### Response:
def _find_by_id(self, id):
"""
Expected response:
{
"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
"Created": "2013-05-07T14:51:42.041847+02:00",
"Path": "date",
"Args": [],
"Config": {
"Hostname": "4fa6e0f0c678",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": false,
"AttachStdout": true,
"AttachStderr": true,
"PortSpecs": null,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": null,
"Cmd": [
"date"
],
"Dns": null,
"Image": "base",
"Volumes": {},
"VolumesFrom": "",
"WorkingDir":""
},
"State": {
"Running": false,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-05-07T14:51:42.087658+02:01360",
"Ghost": false
},
"Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"NetworkSettings": {
"IpAddress": "",
"IpPrefixLen": 0,
"Gateway": "",
"Bridge": "",
"PortMapping": null
},
"SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
"ResolvConfPath": "/etc/resolv.conf",
"Volumes": {},
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LxcConf": [],
"Privileged": false,
"PortBindings": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "49153"
}
]
},
"Links": ["/name:alias"],
"PublishAllPorts": false,
"CapAdd: ["NET_ADMIN"],
"CapDrop: ["MKNOD"]
}
}
"""
if not isinstance(id, six.string_types):
raise TypeError('must supply a string as the id')
# TODO: We should probably catch container not found error and return out own errors.
response = normalize_keys(self.client.inspect_container(id))
# TODO: normalize response to change - to _
self.id = response['id']
self.name = response['name'].replace('/', '')
self.image = response['image']
# come back and figure the timezone stuff out later.
self.created_at = dateutil.parser.parse(response['created'], ignoretz=True)
self.config = ContainerConfig(response['config'])
self.host_config = HostConfig(response['host_config'])
if self._transcribe:
self.start_transcribing() |
def on_get(resc, req, resp, rid):
""" Find the model by id & serialize it back """
signals.pre_req.send(resc.model)
signals.pre_req_find.send(resc.model)
model = find(resc.model, rid)
props = to_rest_model(model, includes=req.includes)
resp.last_modified = model.updated
resp.serialize(props)
signals.post_req.send(resc.model)
signals.post_req_find.send(resc.model) | Find the model by id & serialize it back | Below is the the instruction that describes the task:
### Input:
Find the model by id & serialize it back
### Response:
def on_get(resc, req, resp, rid):
""" Find the model by id & serialize it back """
signals.pre_req.send(resc.model)
signals.pre_req_find.send(resc.model)
model = find(resc.model, rid)
props = to_rest_model(model, includes=req.includes)
resp.last_modified = model.updated
resp.serialize(props)
signals.post_req.send(resc.model)
signals.post_req_find.send(resc.model) |
def from_ast(
pyast_node, node=None, node_cls=None, Node=Node,
iter_fields=ast.iter_fields, AST=ast.AST):
'''Convert the ast tree to a tater tree.
'''
node_cls = node_cls or Node
node = node or node_cls()
name = pyast_node.__class__.__name__
attrs = []
for field, value in iter_fields(pyast_node):
if name == 'Dict':
for key, value in zip(pyast_node.keys, pyast_node.values):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
value = from_ast(item)
elif isinstance(value, AST):
value = from_ast(value)
attrs.append((key.s, value))
else:
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
value = from_ast(item)
elif isinstance(value, AST):
value = from_ast(value)
attrs.append((field, value))
node.update(attrs, type=name)
return node | Convert the ast tree to a tater tree. | Below is the the instruction that describes the task:
### Input:
Convert the ast tree to a tater tree.
### Response:
def from_ast(
pyast_node, node=None, node_cls=None, Node=Node,
iter_fields=ast.iter_fields, AST=ast.AST):
'''Convert the ast tree to a tater tree.
'''
node_cls = node_cls or Node
node = node or node_cls()
name = pyast_node.__class__.__name__
attrs = []
for field, value in iter_fields(pyast_node):
if name == 'Dict':
for key, value in zip(pyast_node.keys, pyast_node.values):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
value = from_ast(item)
elif isinstance(value, AST):
value = from_ast(value)
attrs.append((key.s, value))
else:
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
value = from_ast(item)
elif isinstance(value, AST):
value = from_ast(value)
attrs.append((field, value))
node.update(attrs, type=name)
return node |
def get_extra(cls, name=None):
"""Gets extra configuration parameters.
These parameters should be loaded through load_extra or load_extra_data.
Args:
name: str, the name of the configuration data to load.
Returns:
A dictionary containing the requested configuration data. None if
data was never loaded under that name.
"""
if not name:
return cls._extra_config
return cls._extra_config.get(name, None) | Gets extra configuration parameters.
These parameters should be loaded through load_extra or load_extra_data.
Args:
name: str, the name of the configuration data to load.
Returns:
A dictionary containing the requested configuration data. None if
data was never loaded under that name. | Below is the the instruction that describes the task:
### Input:
Gets extra configuration parameters.
These parameters should be loaded through load_extra or load_extra_data.
Args:
name: str, the name of the configuration data to load.
Returns:
A dictionary containing the requested configuration data. None if
data was never loaded under that name.
### Response:
def get_extra(cls, name=None):
"""Gets extra configuration parameters.
These parameters should be loaded through load_extra or load_extra_data.
Args:
name: str, the name of the configuration data to load.
Returns:
A dictionary containing the requested configuration data. None if
data was never loaded under that name.
"""
if not name:
return cls._extra_config
return cls._extra_config.get(name, None) |
def previous(self):
"""Return a copy of this object as was at its previous state in
history.
Returns None if this object is new (and therefore has no history).
The returned object is always "disconnected", i.e. does not receive
live updates.
"""
return self.model.state.get_entity(
self.entity_type, self.entity_id, self._history_index - 1,
connected=False) | Return a copy of this object as was at its previous state in
history.
Returns None if this object is new (and therefore has no history).
The returned object is always "disconnected", i.e. does not receive
live updates. | Below is the the instruction that describes the task:
### Input:
Return a copy of this object as was at its previous state in
history.
Returns None if this object is new (and therefore has no history).
The returned object is always "disconnected", i.e. does not receive
live updates.
### Response:
def previous(self):
"""Return a copy of this object as was at its previous state in
history.
Returns None if this object is new (and therefore has no history).
The returned object is always "disconnected", i.e. does not receive
live updates.
"""
return self.model.state.get_entity(
self.entity_type, self.entity_id, self._history_index - 1,
connected=False) |
def blink_figure(self):
"""Blink figure once."""
if self.fig:
self._blink_flag = not self._blink_flag
self.repaint()
if self._blink_flag:
timer = QTimer()
timer.singleShot(40, self.blink_figure) | Blink figure once. | Below is the the instruction that describes the task:
### Input:
Blink figure once.
### Response:
def blink_figure(self):
"""Blink figure once."""
if self.fig:
self._blink_flag = not self._blink_flag
self.repaint()
if self._blink_flag:
timer = QTimer()
timer.singleShot(40, self.blink_figure) |
def select_loose_in(pl,k):
'''
pl = ['bcd','xabcxx','x','y']
select_loose_in(pl,'abc')
'''
def cond_func(ele,index,k):
if(type(ele) == type([])):
cond = loose_in(ele,k)
else:
cond = (k in ele)
return(cond)
arr = cond_select_values_all2(pl,cond_func=cond_func, cond_func_args =[k])
return(arr) | pl = ['bcd','xabcxx','x','y']
select_loose_in(pl,'abc') | Below is the the instruction that describes the task:
### Input:
pl = ['bcd','xabcxx','x','y']
select_loose_in(pl,'abc')
### Response:
def select_loose_in(pl,k):
'''
pl = ['bcd','xabcxx','x','y']
select_loose_in(pl,'abc')
'''
def cond_func(ele,index,k):
if(type(ele) == type([])):
cond = loose_in(ele,k)
else:
cond = (k in ele)
return(cond)
arr = cond_select_values_all2(pl,cond_func=cond_func, cond_func_args =[k])
return(arr) |
def list_roles(self, service_id=None, limit=None, marker=None):
"""
Returns a list of all global roles for users, optionally limited by
service. Pagination can be handled through the standard 'limit' and
'marker' parameters.
"""
uri = "OS-KSADM/roles"
pagination_items = []
if service_id is not None:
pagination_items.append("serviceId=%s" % service_id)
if limit is not None:
pagination_items.append("limit=%s" % limit)
if marker is not None:
pagination_items.append("marker=%s" % marker)
pagination = "&".join(pagination_items)
if pagination:
uri = "%s?%s" % (uri, pagination)
resp, resp_body = self.method_get(uri)
roles = resp_body.get("roles", [])
return [Role(self, role) for role in roles] | Returns a list of all global roles for users, optionally limited by
service. Pagination can be handled through the standard 'limit' and
'marker' parameters. | Below is the the instruction that describes the task:
### Input:
Returns a list of all global roles for users, optionally limited by
service. Pagination can be handled through the standard 'limit' and
'marker' parameters.
### Response:
def list_roles(self, service_id=None, limit=None, marker=None):
"""
Returns a list of all global roles for users, optionally limited by
service. Pagination can be handled through the standard 'limit' and
'marker' parameters.
"""
uri = "OS-KSADM/roles"
pagination_items = []
if service_id is not None:
pagination_items.append("serviceId=%s" % service_id)
if limit is not None:
pagination_items.append("limit=%s" % limit)
if marker is not None:
pagination_items.append("marker=%s" % marker)
pagination = "&".join(pagination_items)
if pagination:
uri = "%s?%s" % (uri, pagination)
resp, resp_body = self.method_get(uri)
roles = resp_body.get("roles", [])
return [Role(self, role) for role in roles] |
def make_lib(self, version):
"""
Packs everything into a single lib/ directory.
"""
if os.path.exists(POETRY_LIB_BACKUP):
shutil.rmtree(POETRY_LIB_BACKUP)
# Backup the current installation
if os.path.exists(POETRY_LIB):
shutil.copytree(POETRY_LIB, POETRY_LIB_BACKUP)
shutil.rmtree(POETRY_LIB)
try:
self._make_lib(version)
except Exception:
if not os.path.exists(POETRY_LIB_BACKUP):
raise
shutil.copytree(POETRY_LIB_BACKUP, POETRY_LIB)
shutil.rmtree(POETRY_LIB_BACKUP)
raise
finally:
if os.path.exists(POETRY_LIB_BACKUP):
shutil.rmtree(POETRY_LIB_BACKUP) | Packs everything into a single lib/ directory. | Below is the the instruction that describes the task:
### Input:
Packs everything into a single lib/ directory.
### Response:
def make_lib(self, version):
"""
Packs everything into a single lib/ directory.
"""
if os.path.exists(POETRY_LIB_BACKUP):
shutil.rmtree(POETRY_LIB_BACKUP)
# Backup the current installation
if os.path.exists(POETRY_LIB):
shutil.copytree(POETRY_LIB, POETRY_LIB_BACKUP)
shutil.rmtree(POETRY_LIB)
try:
self._make_lib(version)
except Exception:
if not os.path.exists(POETRY_LIB_BACKUP):
raise
shutil.copytree(POETRY_LIB_BACKUP, POETRY_LIB)
shutil.rmtree(POETRY_LIB_BACKUP)
raise
finally:
if os.path.exists(POETRY_LIB_BACKUP):
shutil.rmtree(POETRY_LIB_BACKUP) |
def delete_model(self, model):
"""Delete a specific session."""
if SessionActivity.is_current(sid_s=model.sid_s):
flash('You could not remove your current session', 'error')
return
delete_session(sid_s=model.sid_s)
db.session.commit() | Delete a specific session. | Below is the the instruction that describes the task:
### Input:
Delete a specific session.
### Response:
def delete_model(self, model):
"""Delete a specific session."""
if SessionActivity.is_current(sid_s=model.sid_s):
flash('You could not remove your current session', 'error')
return
delete_session(sid_s=model.sid_s)
db.session.commit() |
def read_mutiple_items(f, container_type, item_type, separator=" "):
""" Extract an iterable from the current line of a file-like object.
Args:
f (file): the file-like object to read from
container_type (type): type of the iterable that will be returned
item_type (type): type of the values that will be elements of the returned iterable
separator (str): the separator between two consecutive items
Returns:
The extracted iterable
Example:
The file "a.input" contains three lines and three comma-separated digits on each::
>>> with open("a.input") as f:
... print(utools.files.read_multiple_items(f, list, int, separator=","))
... print(utools.files.read_multiple_items(f, set, str, separator=","))
... print(utools.files.read_multiple_items(f, tuple, float, separator=","))
...
[1, 2, 3]
{"4", "5", "6"}
(7.0, 8.0, 9.0)
"""
return __read(f, lambda line: container_type(item_type(item) for item in line.split(separator))) | Extract an iterable from the current line of a file-like object.
Args:
f (file): the file-like object to read from
container_type (type): type of the iterable that will be returned
item_type (type): type of the values that will be elements of the returned iterable
separator (str): the separator between two consecutive items
Returns:
The extracted iterable
Example:
The file "a.input" contains three lines and three comma-separated digits on each::
>>> with open("a.input") as f:
... print(utools.files.read_multiple_items(f, list, int, separator=","))
... print(utools.files.read_multiple_items(f, set, str, separator=","))
... print(utools.files.read_multiple_items(f, tuple, float, separator=","))
...
[1, 2, 3]
{"4", "5", "6"}
(7.0, 8.0, 9.0) | Below is the the instruction that describes the task:
### Input:
Extract an iterable from the current line of a file-like object.
Args:
f (file): the file-like object to read from
container_type (type): type of the iterable that will be returned
item_type (type): type of the values that will be elements of the returned iterable
separator (str): the separator between two consecutive items
Returns:
The extracted iterable
Example:
The file "a.input" contains three lines and three comma-separated digits on each::
>>> with open("a.input") as f:
... print(utools.files.read_multiple_items(f, list, int, separator=","))
... print(utools.files.read_multiple_items(f, set, str, separator=","))
... print(utools.files.read_multiple_items(f, tuple, float, separator=","))
...
[1, 2, 3]
{"4", "5", "6"}
(7.0, 8.0, 9.0)
### Response:
def read_mutiple_items(f, container_type, item_type, separator=" "):
""" Extract an iterable from the current line of a file-like object.
Args:
f (file): the file-like object to read from
container_type (type): type of the iterable that will be returned
item_type (type): type of the values that will be elements of the returned iterable
separator (str): the separator between two consecutive items
Returns:
The extracted iterable
Example:
The file "a.input" contains three lines and three comma-separated digits on each::
>>> with open("a.input") as f:
... print(utools.files.read_multiple_items(f, list, int, separator=","))
... print(utools.files.read_multiple_items(f, set, str, separator=","))
... print(utools.files.read_multiple_items(f, tuple, float, separator=","))
...
[1, 2, 3]
{"4", "5", "6"}
(7.0, 8.0, 9.0)
"""
return __read(f, lambda line: container_type(item_type(item) for item in line.split(separator))) |
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n) | Create drawables for the line numbers. | Below is the the instruction that describes the task:
### Input:
Create drawables for the line numbers.
### Response:
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n) |
def get(cls, group, admin):
"""Get specific GroupAdmin object."""
try:
ga = cls.query.filter_by(
group=group, admin_id=admin.get_id(),
admin_type=resolve_admin_type(admin)).one()
return ga
except Exception:
return None | Get specific GroupAdmin object. | Below is the the instruction that describes the task:
### Input:
Get specific GroupAdmin object.
### Response:
def get(cls, group, admin):
"""Get specific GroupAdmin object."""
try:
ga = cls.query.filter_by(
group=group, admin_id=admin.get_id(),
admin_type=resolve_admin_type(admin)).one()
return ga
except Exception:
return None |
def sync(self):
"""
Fetch the list of apps from Marathon, find the domains that require
certificates, and issue certificates for any domains that don't already
have a certificate.
"""
self.log.info('Starting a sync...')
def log_success(result):
self.log.info('Sync completed successfully')
return result
def log_failure(failure):
self.log.failure('Sync failed', failure, LogLevel.error)
return failure
return (self.marathon_client.get_apps()
.addCallback(self._apps_acme_domains)
.addCallback(self._filter_new_domains)
.addCallback(self._issue_certs)
.addCallbacks(log_success, log_failure)) | Fetch the list of apps from Marathon, find the domains that require
certificates, and issue certificates for any domains that don't already
have a certificate. | Below is the the instruction that describes the task:
### Input:
Fetch the list of apps from Marathon, find the domains that require
certificates, and issue certificates for any domains that don't already
have a certificate.
### Response:
def sync(self):
"""
Fetch the list of apps from Marathon, find the domains that require
certificates, and issue certificates for any domains that don't already
have a certificate.
"""
self.log.info('Starting a sync...')
def log_success(result):
self.log.info('Sync completed successfully')
return result
def log_failure(failure):
self.log.failure('Sync failed', failure, LogLevel.error)
return failure
return (self.marathon_client.get_apps()
.addCallback(self._apps_acme_domains)
.addCallback(self._filter_new_domains)
.addCallback(self._issue_certs)
.addCallbacks(log_success, log_failure)) |
def contains_list(longer, shorter):
"""Check if longer list starts with shorter list"""
if len(longer) <= len(shorter):
return False
for a, b in zip(shorter, longer):
if a != b:
return False
return True | Check if longer list starts with shorter list | Below is the the instruction that describes the task:
### Input:
Check if longer list starts with shorter list
### Response:
def contains_list(longer, shorter):
"""Check if longer list starts with shorter list"""
if len(longer) <= len(shorter):
return False
for a, b in zip(shorter, longer):
if a != b:
return False
return True |
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run | Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session | Below is the the instruction that describes the task:
### Input:
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
### Response:
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run |
def waznlike(word1, wazn):
"""If the word1 is like a wazn (pattern),
the letters must be equal,
the wazn has FEH, AIN, LAM letters.
this are as generic letters.
The two words can be full vocalized, or partial vocalized
@param word1: input word
@type word1: unicode
@param wazn: given word template وزن
@type wazn: unicode
@return: if two words have similar vocalization
@rtype: Boolean
"""
stack1 = stack.Stack(word1)
stack2 = stack.Stack(wazn)
root = stack.Stack()
last1 = stack1.pop()
last2 = stack2.pop()
vowels = HARAKAT
while last1 != None and last2 != None:
if last1 == last2 and last2 not in (FEH, AIN, LAM):
last1 = stack1.pop()
last2 = stack2.pop()
elif last1 not in vowels and last2 in (FEH, AIN, LAM):
root.push(last1)
# ~ print "t"
last1 = stack1.pop()
last2 = stack2.pop()
elif last1 in vowels and last2 not in vowels:
last1 = stack1.pop()
elif last1 not in vowels and last2 in vowels:
last2 = stack2.pop()
else:
break
# reverse the root letters
root.items.reverse()
# ~ print " the root is ", root.items#"".join(root.items)
if not (stack1.is_empty() and stack2.is_empty()):
return False
else:
return True | If the word1 is like a wazn (pattern),
the letters must be equal,
the wazn has FEH, AIN, LAM letters.
this are as generic letters.
The two words can be full vocalized, or partial vocalized
@param word1: input word
@type word1: unicode
@param wazn: given word template وزن
@type wazn: unicode
@return: if two words have similar vocalization
@rtype: Boolean | Below is the the instruction that describes the task:
### Input:
If the word1 is like a wazn (pattern),
the letters must be equal,
the wazn has FEH, AIN, LAM letters.
this are as generic letters.
The two words can be full vocalized, or partial vocalized
@param word1: input word
@type word1: unicode
@param wazn: given word template وزن
@type wazn: unicode
@return: if two words have similar vocalization
@rtype: Boolean
### Response:
def waznlike(word1, wazn):
"""If the word1 is like a wazn (pattern),
the letters must be equal,
the wazn has FEH, AIN, LAM letters.
this are as generic letters.
The two words can be full vocalized, or partial vocalized
@param word1: input word
@type word1: unicode
@param wazn: given word template وزن
@type wazn: unicode
@return: if two words have similar vocalization
@rtype: Boolean
"""
stack1 = stack.Stack(word1)
stack2 = stack.Stack(wazn)
root = stack.Stack()
last1 = stack1.pop()
last2 = stack2.pop()
vowels = HARAKAT
while last1 != None and last2 != None:
if last1 == last2 and last2 not in (FEH, AIN, LAM):
last1 = stack1.pop()
last2 = stack2.pop()
elif last1 not in vowels and last2 in (FEH, AIN, LAM):
root.push(last1)
# ~ print "t"
last1 = stack1.pop()
last2 = stack2.pop()
elif last1 in vowels and last2 not in vowels:
last1 = stack1.pop()
elif last1 not in vowels and last2 in vowels:
last2 = stack2.pop()
else:
break
# reverse the root letters
root.items.reverse()
# ~ print " the root is ", root.items#"".join(root.items)
if not (stack1.is_empty() and stack2.is_empty()):
return False
else:
return True |
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='Feature importance', ylabel='Features',
importance_type='split', max_num_features=None,
ignore_zero=True, figsize=None, grid=True,
precision=None, **kwargs):
"""Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=None)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib to plot importance.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
importance = booster.feature_importance(importance_type=importance_type)
feature_name = booster.feature_name()
if not len(importance):
raise ValueError("Booster's feature_importance is empty.")
tuples = sorted(zip_(feature_name, importance), key=lambda x: x[1])
if ignore_zero:
tuples = [x for x in tuples if x[1] > 0]
if max_num_features is not None and max_num_features > 0:
tuples = tuples[-max_num_features:]
labels, values = zip_(*tuples)
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
for x, y in zip_(values, ylocs):
ax.text(x + 1, y,
_float2str(x, precision) if importance_type == 'gain' else x,
va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax | Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=None)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances. | Below is the the instruction that describes the task:
### Input:
Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=None)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances.
### Response:
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='Feature importance', ylabel='Features',
importance_type='split', max_num_features=None,
ignore_zero=True, figsize=None, grid=True,
precision=None, **kwargs):
"""Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=None)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib to plot importance.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
importance = booster.feature_importance(importance_type=importance_type)
feature_name = booster.feature_name()
if not len(importance):
raise ValueError("Booster's feature_importance is empty.")
tuples = sorted(zip_(feature_name, importance), key=lambda x: x[1])
if ignore_zero:
tuples = [x for x in tuples if x[1] > 0]
if max_num_features is not None and max_num_features > 0:
tuples = tuples[-max_num_features:]
labels, values = zip_(*tuples)
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
for x, y in zip_(values, ylocs):
ax.text(x + 1, y,
_float2str(x, precision) if importance_type == 'gain' else x,
va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax |
def metadata_converter_help():
"""Help message for metadata converter Dialog.
.. versionadded:: 4.3
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message | Help message for metadata converter Dialog.
.. versionadded:: 4.3
:returns: A message object containing helpful information.
:rtype: messaging.message.Message | Below is the the instruction that describes the task:
### Input:
Help message for metadata converter Dialog.
.. versionadded:: 4.3
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
### Response:
def metadata_converter_help():
"""Help message for metadata converter Dialog.
.. versionadded:: 4.3
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message |
def get_previous(self):
"""Returns the previous :obj:`Gtk.TreeModelRow` or None"""
prev_iter = self.model.iter_previous(self.iter)
if prev_iter:
return TreeModelRow(self.model, prev_iter) | Returns the previous :obj:`Gtk.TreeModelRow` or None | Below is the the instruction that describes the task:
### Input:
Returns the previous :obj:`Gtk.TreeModelRow` or None
### Response:
def get_previous(self):
"""Returns the previous :obj:`Gtk.TreeModelRow` or None"""
prev_iter = self.model.iter_previous(self.iter)
if prev_iter:
return TreeModelRow(self.model, prev_iter) |
def write_backup_meta_data(self):
"""Write the auto backup meta data into the current tmp-storage path"""
auto_backup_meta_file = os.path.join(self._tmp_storage_path, FILE_NAME_AUTO_BACKUP)
storage.storage_utils.write_dict_to_json(self.meta, auto_backup_meta_file) | Write the auto backup meta data into the current tmp-storage path | Below is the the instruction that describes the task:
### Input:
Write the auto backup meta data into the current tmp-storage path
### Response:
def write_backup_meta_data(self):
"""Write the auto backup meta data into the current tmp-storage path"""
auto_backup_meta_file = os.path.join(self._tmp_storage_path, FILE_NAME_AUTO_BACKUP)
storage.storage_utils.write_dict_to_json(self.meta, auto_backup_meta_file) |
def runExperiment():
"""
Experiment 1: Calculate error rate as a function of training sequence numbers
:return:
"""
trainSeqN = [5, 10, 20, 50, 100, 200]
rptPerCondition = 5
correctRateAll = np.zeros((len(trainSeqN), rptPerCondition))
missRateAll = np.zeros((len(trainSeqN), rptPerCondition))
fpRateAll = np.zeros((len(trainSeqN), rptPerCondition))
for i in xrange(len(trainSeqN)):
for rpt in xrange(rptPerCondition):
train_seed = 1
numTrainSequence = trainSeqN[i]
net = initializeLSTMnet()
net = trainLSTMnet(net, numTrainSequence, seedSeq=train_seed)
(correctRate, missRate, fpRate) = testLSTMnet(net, numTestSequence, seedSeq=train_seed+rpt)
correctRateAll[i, rpt] = correctRate
missRateAll[i, rpt] = missRate
fpRateAll[i, rpt] = fpRate
np.savez('result/reberSequenceLSTM.npz',
correctRateAll=correctRateAll, missRateAll=missRateAll,
fpRateAll=fpRateAll, trainSeqN=trainSeqN)
plt.figure()
plt.subplot(2,2,1)
plt.semilogx(trainSeqN, 100*np.mean(correctRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate - Best Match (%)')
plt.subplot(2,2,2)
plt.semilogx(trainSeqN, 100*np.mean(missRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(trainSeqN, 100*np.mean(fpRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_LSTMperformance.pdf') | Experiment 1: Calculate error rate as a function of training sequence numbers
:return: | Below is the the instruction that describes the task:
### Input:
Experiment 1: Calculate error rate as a function of training sequence numbers
:return:
### Response:
def runExperiment():
"""
Experiment 1: Calculate error rate as a function of training sequence numbers
:return:
"""
trainSeqN = [5, 10, 20, 50, 100, 200]
rptPerCondition = 5
correctRateAll = np.zeros((len(trainSeqN), rptPerCondition))
missRateAll = np.zeros((len(trainSeqN), rptPerCondition))
fpRateAll = np.zeros((len(trainSeqN), rptPerCondition))
for i in xrange(len(trainSeqN)):
for rpt in xrange(rptPerCondition):
train_seed = 1
numTrainSequence = trainSeqN[i]
net = initializeLSTMnet()
net = trainLSTMnet(net, numTrainSequence, seedSeq=train_seed)
(correctRate, missRate, fpRate) = testLSTMnet(net, numTestSequence, seedSeq=train_seed+rpt)
correctRateAll[i, rpt] = correctRate
missRateAll[i, rpt] = missRate
fpRateAll[i, rpt] = fpRate
np.savez('result/reberSequenceLSTM.npz',
correctRateAll=correctRateAll, missRateAll=missRateAll,
fpRateAll=fpRateAll, trainSeqN=trainSeqN)
plt.figure()
plt.subplot(2,2,1)
plt.semilogx(trainSeqN, 100*np.mean(correctRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate - Best Match (%)')
plt.subplot(2,2,2)
plt.semilogx(trainSeqN, 100*np.mean(missRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(trainSeqN, 100*np.mean(fpRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_LSTMperformance.pdf') |
def leaveEvent( self, event ):
"""
Toggles the display for the tracker item.
"""
item = self.trackerItem()
if ( item ):
item.setVisible(False) | Toggles the display for the tracker item. | Below is the the instruction that describes the task:
### Input:
Toggles the display for the tracker item.
### Response:
def leaveEvent( self, event ):
"""
Toggles the display for the tracker item.
"""
item = self.trackerItem()
if ( item ):
item.setVisible(False) |
def _gtu8(ins):
""" Compares & pops top 2 operands out of the stack, and checks
if the 1st operand > 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
8 bit unsigned version
"""
output = _8bit_oper(ins.quad[2], ins.quad[3], reversed_=True)
output.append('cp h')
output.append('sbc a, a')
output.append('push af')
return output | Compares & pops top 2 operands out of the stack, and checks
if the 1st operand > 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
8 bit unsigned version | Below is the the instruction that describes the task:
### Input:
Compares & pops top 2 operands out of the stack, and checks
if the 1st operand > 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
8 bit unsigned version
### Response:
def _gtu8(ins):
""" Compares & pops top 2 operands out of the stack, and checks
if the 1st operand > 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
8 bit unsigned version
"""
output = _8bit_oper(ins.quad[2], ins.quad[3], reversed_=True)
output.append('cp h')
output.append('sbc a, a')
output.append('push af')
return output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.