code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def get_queue(cls, name, priority=0, **fields_if_new):
"""
Get, or create, and return the wanted queue.
If the queue is created, fields in fields_if_new will be set for the new
queue.
"""
queue_kwargs = {'name': name, 'priority': priority}
retries = 0
while retries < 10:
retries += 1
try:
queue, created = cls.get_or_connect(**queue_kwargs)
except IndexError:
# Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP
# => retry
continue
except ValueError:
# more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?)
try:
queue = cls.collection(**queue_kwargs).instances()[0]
except IndexError:
# but no more now ?!
# => retry
continue
else:
created = False
# ok we have our queue, stop now
break
if created and fields_if_new:
queue.set_fields(**fields_if_new)
return queue | Get, or create, and return the wanted queue.
If the queue is created, fields in fields_if_new will be set for the new
queue. | Below is the the instruction that describes the task:
### Input:
Get, or create, and return the wanted queue.
If the queue is created, fields in fields_if_new will be set for the new
queue.
### Response:
def get_queue(cls, name, priority=0, **fields_if_new):
"""
Get, or create, and return the wanted queue.
If the queue is created, fields in fields_if_new will be set for the new
queue.
"""
queue_kwargs = {'name': name, 'priority': priority}
retries = 0
while retries < 10:
retries += 1
try:
queue, created = cls.get_or_connect(**queue_kwargs)
except IndexError:
# Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP
# => retry
continue
except ValueError:
# more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?)
try:
queue = cls.collection(**queue_kwargs).instances()[0]
except IndexError:
# but no more now ?!
# => retry
continue
else:
created = False
# ok we have our queue, stop now
break
if created and fields_if_new:
queue.set_fields(**fields_if_new)
return queue |
def _to_chimera(M, N, L, q):
"Converts a qubit's linear index to chimera coordinates."
return (q // N // L // 2, (q // L // 2) % N, (q // L) % 2, q % L) | Converts a qubit's linear index to chimera coordinates. | Below is the the instruction that describes the task:
### Input:
Converts a qubit's linear index to chimera coordinates.
### Response:
def _to_chimera(M, N, L, q):
"Converts a qubit's linear index to chimera coordinates."
return (q // N // L // 2, (q // L // 2) % N, (q // L) % 2, q % L) |
def parse(
version, strict=False # type: str # type: bool
): # type:(...) -> Union[Version, LegacyVersion]
"""
Parse the given version string and return either a :class:`Version` object
or a LegacyVersion object depending on if the given version is
a valid PEP 440 version or a legacy version.
If strict=True only PEP 440 versions will be accepted.
"""
try:
return Version(version)
except InvalidVersion:
if strict:
raise
return LegacyVersion(version) | Parse the given version string and return either a :class:`Version` object
or a LegacyVersion object depending on if the given version is
a valid PEP 440 version or a legacy version.
If strict=True only PEP 440 versions will be accepted. | Below is the the instruction that describes the task:
### Input:
Parse the given version string and return either a :class:`Version` object
or a LegacyVersion object depending on if the given version is
a valid PEP 440 version or a legacy version.
If strict=True only PEP 440 versions will be accepted.
### Response:
def parse(
version, strict=False # type: str # type: bool
): # type:(...) -> Union[Version, LegacyVersion]
"""
Parse the given version string and return either a :class:`Version` object
or a LegacyVersion object depending on if the given version is
a valid PEP 440 version or a legacy version.
If strict=True only PEP 440 versions will be accepted.
"""
try:
return Version(version)
except InvalidVersion:
if strict:
raise
return LegacyVersion(version) |
def get_all_constants():
"""
Get list of all uppercase, non-private globals (doesn't start with ``_``).
Returns:
list: Uppercase names defined in `globals()` (variables from this \
module).
"""
return filter(
lambda key: key.upper() == key and type(globals()[key]) in _ALLOWED,
filter( # filter _PRIVATE variables
lambda x: not x.startswith("_"),
globals()
)
) | Get list of all uppercase, non-private globals (doesn't start with ``_``).
Returns:
list: Uppercase names defined in `globals()` (variables from this \
module). | Below is the the instruction that describes the task:
### Input:
Get list of all uppercase, non-private globals (doesn't start with ``_``).
Returns:
list: Uppercase names defined in `globals()` (variables from this \
module).
### Response:
def get_all_constants():
"""
Get list of all uppercase, non-private globals (doesn't start with ``_``).
Returns:
list: Uppercase names defined in `globals()` (variables from this \
module).
"""
return filter(
lambda key: key.upper() == key and type(globals()[key]) in _ALLOWED,
filter( # filter _PRIVATE variables
lambda x: not x.startswith("_"),
globals()
)
) |
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("POT estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters]) | Get parameter names for the estimator | Below is the the instruction that describes the task:
### Input:
Get parameter names for the estimator
### Response:
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("POT estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters]) |
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close() | Execute the tasky/asyncio event loop for `duration` seconds. | Below is the the instruction that describes the task:
### Input:
Execute the tasky/asyncio event loop for `duration` seconds.
### Response:
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close() |
def openflow_controller_controller_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
openflow_controller = ET.SubElement(config, "openflow-controller", xmlns="urn:brocade.com:mgmt:brocade-openflow")
controller_name = ET.SubElement(openflow_controller, "controller-name")
controller_name.text = kwargs.pop('controller_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def openflow_controller_controller_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
openflow_controller = ET.SubElement(config, "openflow-controller", xmlns="urn:brocade.com:mgmt:brocade-openflow")
controller_name = ET.SubElement(openflow_controller, "controller-name")
controller_name.text = kwargs.pop('controller_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def run_mp(songs):
"""
Concurrently calls get_lyrics to fetch the lyrics of a large list of songs.
"""
stats = Stats()
if CONFIG['debug']:
good = open('found', 'w')
bad = open('notfound', 'w')
logger.debug('Launching a pool of %d processes\n', CONFIG['jobcount'])
chunksize = math.ceil(len(songs) / os.cpu_count())
try:
with Pool(CONFIG['jobcount']) as pool:
for result in pool.imap_unordered(get_lyrics, songs, chunksize):
if result is None:
continue
for source, runtime in result.runtimes.items():
stats.add_result(source, result.source == source, runtime)
found = process_result(result)
if CONFIG['debug']:
if found:
good.write(f'{id_source(source)}: {result.song}\n')
good.flush()
else:
bad.write(str(result.song) + '\n')
bad.flush()
finally:
if CONFIG['debug']:
good.close()
bad.close()
return stats | Concurrently calls get_lyrics to fetch the lyrics of a large list of songs. | Below is the the instruction that describes the task:
### Input:
Concurrently calls get_lyrics to fetch the lyrics of a large list of songs.
### Response:
def run_mp(songs):
"""
Concurrently calls get_lyrics to fetch the lyrics of a large list of songs.
"""
stats = Stats()
if CONFIG['debug']:
good = open('found', 'w')
bad = open('notfound', 'w')
logger.debug('Launching a pool of %d processes\n', CONFIG['jobcount'])
chunksize = math.ceil(len(songs) / os.cpu_count())
try:
with Pool(CONFIG['jobcount']) as pool:
for result in pool.imap_unordered(get_lyrics, songs, chunksize):
if result is None:
continue
for source, runtime in result.runtimes.items():
stats.add_result(source, result.source == source, runtime)
found = process_result(result)
if CONFIG['debug']:
if found:
good.write(f'{id_source(source)}: {result.song}\n')
good.flush()
else:
bad.write(str(result.song) + '\n')
bad.flush()
finally:
if CONFIG['debug']:
good.close()
bad.close()
return stats |
def _data_is_binary(self, data):
"""Check if the data contains binary components."""
if isinstance(data, six.binary_type):
return True
elif isinstance(data, list):
return functools.reduce(
lambda a, b: a or b, [self._data_is_binary(item)
for item in data], False)
elif isinstance(data, dict):
return functools.reduce(
lambda a, b: a or b, [self._data_is_binary(item)
for item in six.itervalues(data)],
False)
else:
return False | Check if the data contains binary components. | Below is the the instruction that describes the task:
### Input:
Check if the data contains binary components.
### Response:
def _data_is_binary(self, data):
"""Check if the data contains binary components."""
if isinstance(data, six.binary_type):
return True
elif isinstance(data, list):
return functools.reduce(
lambda a, b: a or b, [self._data_is_binary(item)
for item in data], False)
elif isinstance(data, dict):
return functools.reduce(
lambda a, b: a or b, [self._data_is_binary(item)
for item in six.itervalues(data)],
False)
else:
return False |
def select_options(self, options_prefix):
""" Select options from this selection, that are started with the specified prefix
:param options_prefix: name prefix of options that should be selected
:return: WConfigSelection
"""
return WConfigSelection(
self.config(), self.section(), self.option_prefix() + options_prefix
) | Select options from this selection, that are started with the specified prefix
:param options_prefix: name prefix of options that should be selected
:return: WConfigSelection | Below is the the instruction that describes the task:
### Input:
Select options from this selection, that are started with the specified prefix
:param options_prefix: name prefix of options that should be selected
:return: WConfigSelection
### Response:
def select_options(self, options_prefix):
""" Select options from this selection, that are started with the specified prefix
:param options_prefix: name prefix of options that should be selected
:return: WConfigSelection
"""
return WConfigSelection(
self.config(), self.section(), self.option_prefix() + options_prefix
) |
def create_file_vdev(size, *vdevs):
'''
Creates file based virtual devices for a zpool
CLI Example:
.. code-block:: bash
salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...]
.. note::
Depending on file size, the above command may take a while to return.
'''
ret = OrderedDict()
err = OrderedDict()
_mkfile_cmd = salt.utils.path.which('mkfile')
for vdev in vdevs:
if os.path.isfile(vdev):
ret[vdev] = 'existed'
else:
res = __salt__['cmd.run_all'](
'{mkfile} {size} {vdev}'.format(
mkfile=_mkfile_cmd,
size=size,
vdev=vdev,
),
python_shell=False,
)
if res['retcode'] != 0:
if 'stderr' in res and ':' in res['stderr']:
ret[vdev] = 'failed'
err[vdev] = ":".join(res['stderr'].strip().split(':')[1:])
else:
ret[vdev] = 'created'
if err:
ret['error'] = err
return ret | Creates file based virtual devices for a zpool
CLI Example:
.. code-block:: bash
salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...]
.. note::
Depending on file size, the above command may take a while to return. | Below is the the instruction that describes the task:
### Input:
Creates file based virtual devices for a zpool
CLI Example:
.. code-block:: bash
salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...]
.. note::
Depending on file size, the above command may take a while to return.
### Response:
def create_file_vdev(size, *vdevs):
'''
Creates file based virtual devices for a zpool
CLI Example:
.. code-block:: bash
salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...]
.. note::
Depending on file size, the above command may take a while to return.
'''
ret = OrderedDict()
err = OrderedDict()
_mkfile_cmd = salt.utils.path.which('mkfile')
for vdev in vdevs:
if os.path.isfile(vdev):
ret[vdev] = 'existed'
else:
res = __salt__['cmd.run_all'](
'{mkfile} {size} {vdev}'.format(
mkfile=_mkfile_cmd,
size=size,
vdev=vdev,
),
python_shell=False,
)
if res['retcode'] != 0:
if 'stderr' in res and ':' in res['stderr']:
ret[vdev] = 'failed'
err[vdev] = ":".join(res['stderr'].strip().split(':')[1:])
else:
ret[vdev] = 'created'
if err:
ret['error'] = err
return ret |
def make_report(plots, settings):
'''
Creates a fat html report based on the previously created files
plots is a list of Plot objects defined by a path and title
statsfile is the file to which the stats have been saved,
which is parsed to a table (rather dodgy)
'''
logging.info("Writing html report.")
html_content = ['<body>']
# Hyperlink Table of Contents panel
html_content.append('<div class="panel panelC">')
if settings["filtered"]:
html_content.append(
'<p><strong><a href="#stats0">Summary Statistics prior to filtering</a></strong></p>')
html_content.append(
'<p><strong><a href="#stats1">Summary Statistics after filtering</a></strong></p>')
else:
html_content.append(
'<p><strong><a href="#stats0">Summary Statistics</a></strong></p>')
html_content.append('<p><strong><a href="#plots">Plots</a></strong></p>')
html_content.extend(['<p style="margin-left:20px"><a href="#' +
p.title.replace(' ', '_') + '">' + p.title + '</a></p>' for p in plots])
html_content.append('</div>')
# The report itself: stats
html_content.append('<div class="panel panelM"> <h1>NanoPlot report</h1>')
if settings["filtered"]:
html_content.append('<h2 id="stats0">Summary statistics prior to filtering</h2>')
html_content.append(utils.stats2html(settings["statsfile"][0]))
html_content.append('<h2 id="stats1">Summary statistics after filtering</h2>')
html_content.append(utils.stats2html(settings["statsfile"][1]))
else:
html_content.append('<h2 id="stats0">Summary statistics</h2>')
html_content.append(utils.stats2html(settings["statsfile"][0]))
# The report itself: plots
html_content.append('<h2 id="plots">Plots</h2>')
for plot in plots:
html_content.append('\n<h3 id="' + plot.title.replace(' ', '_') + '">' +
plot.title + '</h3>\n' + plot.encode())
html_content.append('\n<br>\n<br>\n<br>\n<br>')
html_body = '\n'.join(html_content) + '</div></body></html>'
html_str = utils.html_head + html_body
htmlreport = settings["path"] + "NanoPlot-report.html"
with open(htmlreport, "w") as html_file:
html_file.write(html_str)
return htmlreport | Creates a fat html report based on the previously created files
plots is a list of Plot objects defined by a path and title
statsfile is the file to which the stats have been saved,
which is parsed to a table (rather dodgy) | Below is the the instruction that describes the task:
### Input:
Creates a fat html report based on the previously created files
plots is a list of Plot objects defined by a path and title
statsfile is the file to which the stats have been saved,
which is parsed to a table (rather dodgy)
### Response:
def make_report(plots, settings):
'''
Creates a fat html report based on the previously created files
plots is a list of Plot objects defined by a path and title
statsfile is the file to which the stats have been saved,
which is parsed to a table (rather dodgy)
'''
logging.info("Writing html report.")
html_content = ['<body>']
# Hyperlink Table of Contents panel
html_content.append('<div class="panel panelC">')
if settings["filtered"]:
html_content.append(
'<p><strong><a href="#stats0">Summary Statistics prior to filtering</a></strong></p>')
html_content.append(
'<p><strong><a href="#stats1">Summary Statistics after filtering</a></strong></p>')
else:
html_content.append(
'<p><strong><a href="#stats0">Summary Statistics</a></strong></p>')
html_content.append('<p><strong><a href="#plots">Plots</a></strong></p>')
html_content.extend(['<p style="margin-left:20px"><a href="#' +
p.title.replace(' ', '_') + '">' + p.title + '</a></p>' for p in plots])
html_content.append('</div>')
# The report itself: stats
html_content.append('<div class="panel panelM"> <h1>NanoPlot report</h1>')
if settings["filtered"]:
html_content.append('<h2 id="stats0">Summary statistics prior to filtering</h2>')
html_content.append(utils.stats2html(settings["statsfile"][0]))
html_content.append('<h2 id="stats1">Summary statistics after filtering</h2>')
html_content.append(utils.stats2html(settings["statsfile"][1]))
else:
html_content.append('<h2 id="stats0">Summary statistics</h2>')
html_content.append(utils.stats2html(settings["statsfile"][0]))
# The report itself: plots
html_content.append('<h2 id="plots">Plots</h2>')
for plot in plots:
html_content.append('\n<h3 id="' + plot.title.replace(' ', '_') + '">' +
plot.title + '</h3>\n' + plot.encode())
html_content.append('\n<br>\n<br>\n<br>\n<br>')
html_body = '\n'.join(html_content) + '</div></body></html>'
html_str = utils.html_head + html_body
htmlreport = settings["path"] + "NanoPlot-report.html"
with open(htmlreport, "w") as html_file:
html_file.write(html_str)
return htmlreport |
def sentence(self):
"""
The sentence related to this mention
:getter: returns the sentence this mention relates to
:type: corenlp_xml.document.Sentence
"""
if self._sentence is None:
sentences = self._element.xpath('sentence/text()')
if len(sentences) > 0:
self._sentence = self._coref.document.get_sentence_by_id(int(sentences[0]))
return self._sentence | The sentence related to this mention
:getter: returns the sentence this mention relates to
:type: corenlp_xml.document.Sentence | Below is the the instruction that describes the task:
### Input:
The sentence related to this mention
:getter: returns the sentence this mention relates to
:type: corenlp_xml.document.Sentence
### Response:
def sentence(self):
"""
The sentence related to this mention
:getter: returns the sentence this mention relates to
:type: corenlp_xml.document.Sentence
"""
if self._sentence is None:
sentences = self._element.xpath('sentence/text()')
if len(sentences) > 0:
self._sentence = self._coref.document.get_sentence_by_id(int(sentences[0]))
return self._sentence |
def apply(self, resource):
"""
Apply filter to resource
:param resource: Image.Image
:return: Image.Image
"""
if not isinstance(resource, Image.Image):
raise ValueError('Unknown resource format')
resource_format = resource.format
if resource.mode != 'RGBA': # pragma: no cover
resource = resource.convert('RGBA')
layer = Image.new('RGBA', resource.size, (0, 0, 0, 0))
image, left, upper = getattr(self, '_' + self.position + '_position')(resource)
layer.paste(image, (left, upper))
image = Image.composite(layer, resource, layer)
image.format = resource_format
return image | Apply filter to resource
:param resource: Image.Image
:return: Image.Image | Below is the the instruction that describes the task:
### Input:
Apply filter to resource
:param resource: Image.Image
:return: Image.Image
### Response:
def apply(self, resource):
"""
Apply filter to resource
:param resource: Image.Image
:return: Image.Image
"""
if not isinstance(resource, Image.Image):
raise ValueError('Unknown resource format')
resource_format = resource.format
if resource.mode != 'RGBA': # pragma: no cover
resource = resource.convert('RGBA')
layer = Image.new('RGBA', resource.size, (0, 0, 0, 0))
image, left, upper = getattr(self, '_' + self.position + '_position')(resource)
layer.paste(image, (left, upper))
image = Image.composite(layer, resource, layer)
image.format = resource_format
return image |
def count_between(self, min_score=None, max_score=None):
"""
Returns the number of members whose score is between *min_score* and
*max_score* (inclusive).
"""
min_score = float('-inf') if min_score is None else float(min_score)
max_score = float('inf') if max_score is None else float(max_score)
return self.redis.zcount(self.key, min_score, max_score) | Returns the number of members whose score is between *min_score* and
*max_score* (inclusive). | Below is the the instruction that describes the task:
### Input:
Returns the number of members whose score is between *min_score* and
*max_score* (inclusive).
### Response:
def count_between(self, min_score=None, max_score=None):
"""
Returns the number of members whose score is between *min_score* and
*max_score* (inclusive).
"""
min_score = float('-inf') if min_score is None else float(min_score)
max_score = float('inf') if max_score is None else float(max_score)
return self.redis.zcount(self.key, min_score, max_score) |
def account_product(self, product_id):
''' a method to retrieve details about a particular account product
{
"error": "",
"code": 200,
"method": "GET",
"url": "https://...",
"headers": { },
"json": {
"productId": "3300",
"productName": "Capital One 360 Money Market Account",
"cdTerms": [
"12M"
],
"annualPercentageYieldDetails": {
"annualPercentageYieldType": "simple",
"annualPercentageYield": 1.4,
"tieredAnnualPercentageYield": [
{
"tierDescription": "$0 - $9,999.99",
"annualPercentageYield": 1.4
}
],
"termBasedAnnualPercentageYield": [
{
"term": "6M",
"annualPercentageYield": 1.2
}
]
},
"disclosures": {
"productDisclosureUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#360savingsdisclosure",
"termsAndConditionsUrl": "https://www.capitalone.com/online-money-market-account/disclosures/#360moneymarketagreement",
"electronicFundTransferDisclosureUrl": "https://www.capitalone.com/cds/online-cds/disclosures/#electronicfundtransferdisclosurestatement",
"privacyPolicyUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#privacypolicy",
"wireTransferAgreementUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#wirefundstransferdisclosurestatement",
"paperlessAgreementUrl": "https://www.capitalone.com/terms_eddn",
"fraudProtectionAgreementUrl": "https://www.capitalone.com/terms-personal-data",
"tcpaDisclosureContent": "If number(s) provided above is(are) mobile phone number(s), it is (they are) my mobile phone number(s), by clicking on the button below, I consent to receive autodialed and prerecorded/artificial calls , including texts, relating to my relationship with Capital One (which may include handling, servicing, and billing for any of my accounts). Message and Data rates may apply. You can stop these types of messages by replying STOP in response to a text message, or by following any other instructions contained in the time-sensitive call.\n[Radio button] You can call or text me through automated means\n[Radio button] You can only contact me through non-automated mean"
}
}
}
'''
title = '%s.account_product' % self.__class__.__name__
# validate inputs
input_fields = {
'product_id': product_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct url
url = self.deposits_endpoint + 'account-products/%s' % product_id
# construct method specific errors
error_map = {
404: 'Not Found. No products found for the provided productId.'
}
# send request
details = self._requests(url, errors=error_map)
return details | a method to retrieve details about a particular account product
{
"error": "",
"code": 200,
"method": "GET",
"url": "https://...",
"headers": { },
"json": {
"productId": "3300",
"productName": "Capital One 360 Money Market Account",
"cdTerms": [
"12M"
],
"annualPercentageYieldDetails": {
"annualPercentageYieldType": "simple",
"annualPercentageYield": 1.4,
"tieredAnnualPercentageYield": [
{
"tierDescription": "$0 - $9,999.99",
"annualPercentageYield": 1.4
}
],
"termBasedAnnualPercentageYield": [
{
"term": "6M",
"annualPercentageYield": 1.2
}
]
},
"disclosures": {
"productDisclosureUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#360savingsdisclosure",
"termsAndConditionsUrl": "https://www.capitalone.com/online-money-market-account/disclosures/#360moneymarketagreement",
"electronicFundTransferDisclosureUrl": "https://www.capitalone.com/cds/online-cds/disclosures/#electronicfundtransferdisclosurestatement",
"privacyPolicyUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#privacypolicy",
"wireTransferAgreementUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#wirefundstransferdisclosurestatement",
"paperlessAgreementUrl": "https://www.capitalone.com/terms_eddn",
"fraudProtectionAgreementUrl": "https://www.capitalone.com/terms-personal-data",
"tcpaDisclosureContent": "If number(s) provided above is(are) mobile phone number(s), it is (they are) my mobile phone number(s), by clicking on the button below, I consent to receive autodialed and prerecorded/artificial calls , including texts, relating to my relationship with Capital One (which may include handling, servicing, and billing for any of my accounts). Message and Data rates may apply. You can stop these types of messages by replying STOP in response to a text message, or by following any other instructions contained in the time-sensitive call.\n[Radio button] You can call or text me through automated means\n[Radio button] You can only contact me through non-automated mean"
}
}
} | Below is the the instruction that describes the task:
### Input:
a method to retrieve details about a particular account product
{
"error": "",
"code": 200,
"method": "GET",
"url": "https://...",
"headers": { },
"json": {
"productId": "3300",
"productName": "Capital One 360 Money Market Account",
"cdTerms": [
"12M"
],
"annualPercentageYieldDetails": {
"annualPercentageYieldType": "simple",
"annualPercentageYield": 1.4,
"tieredAnnualPercentageYield": [
{
"tierDescription": "$0 - $9,999.99",
"annualPercentageYield": 1.4
}
],
"termBasedAnnualPercentageYield": [
{
"term": "6M",
"annualPercentageYield": 1.2
}
]
},
"disclosures": {
"productDisclosureUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#360savingsdisclosure",
"termsAndConditionsUrl": "https://www.capitalone.com/online-money-market-account/disclosures/#360moneymarketagreement",
"electronicFundTransferDisclosureUrl": "https://www.capitalone.com/cds/online-cds/disclosures/#electronicfundtransferdisclosurestatement",
"privacyPolicyUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#privacypolicy",
"wireTransferAgreementUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#wirefundstransferdisclosurestatement",
"paperlessAgreementUrl": "https://www.capitalone.com/terms_eddn",
"fraudProtectionAgreementUrl": "https://www.capitalone.com/terms-personal-data",
"tcpaDisclosureContent": "If number(s) provided above is(are) mobile phone number(s), it is (they are) my mobile phone number(s), by clicking on the button below, I consent to receive autodialed and prerecorded/artificial calls , including texts, relating to my relationship with Capital One (which may include handling, servicing, and billing for any of my accounts). Message and Data rates may apply. You can stop these types of messages by replying STOP in response to a text message, or by following any other instructions contained in the time-sensitive call.\n[Radio button] You can call or text me through automated means\n[Radio button] You can only contact me through non-automated mean"
}
}
}
### Response:
def account_product(self, product_id):
''' a method to retrieve details about a particular account product
{
"error": "",
"code": 200,
"method": "GET",
"url": "https://...",
"headers": { },
"json": {
"productId": "3300",
"productName": "Capital One 360 Money Market Account",
"cdTerms": [
"12M"
],
"annualPercentageYieldDetails": {
"annualPercentageYieldType": "simple",
"annualPercentageYield": 1.4,
"tieredAnnualPercentageYield": [
{
"tierDescription": "$0 - $9,999.99",
"annualPercentageYield": 1.4
}
],
"termBasedAnnualPercentageYield": [
{
"term": "6M",
"annualPercentageYield": 1.2
}
]
},
"disclosures": {
"productDisclosureUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#360savingsdisclosure",
"termsAndConditionsUrl": "https://www.capitalone.com/online-money-market-account/disclosures/#360moneymarketagreement",
"electronicFundTransferDisclosureUrl": "https://www.capitalone.com/cds/online-cds/disclosures/#electronicfundtransferdisclosurestatement",
"privacyPolicyUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#privacypolicy",
"wireTransferAgreementUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#wirefundstransferdisclosurestatement",
"paperlessAgreementUrl": "https://www.capitalone.com/terms_eddn",
"fraudProtectionAgreementUrl": "https://www.capitalone.com/terms-personal-data",
"tcpaDisclosureContent": "If number(s) provided above is(are) mobile phone number(s), it is (they are) my mobile phone number(s), by clicking on the button below, I consent to receive autodialed and prerecorded/artificial calls , including texts, relating to my relationship with Capital One (which may include handling, servicing, and billing for any of my accounts). Message and Data rates may apply. You can stop these types of messages by replying STOP in response to a text message, or by following any other instructions contained in the time-sensitive call.\n[Radio button] You can call or text me through automated means\n[Radio button] You can only contact me through non-automated mean"
}
}
}
'''
title = '%s.account_product' % self.__class__.__name__
# validate inputs
input_fields = {
'product_id': product_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct url
url = self.deposits_endpoint + 'account-products/%s' % product_id
# construct method specific errors
error_map = {
404: 'Not Found. No products found for the provided productId.'
}
# send request
details = self._requests(url, errors=error_map)
return details |
def _set_defaults(self):
"""
Set default values to fields. We assume that they are not yet populated
as this method is called just after creation of a new pk.
"""
for field_name in self._fields:
if field_name in self._init_fields:
continue
field = self.get_field(field_name)
if hasattr(field, "default"):
field.proxy_set(field.default) | Set default values to fields. We assume that they are not yet populated
as this method is called just after creation of a new pk. | Below is the the instruction that describes the task:
### Input:
Set default values to fields. We assume that they are not yet populated
as this method is called just after creation of a new pk.
### Response:
def _set_defaults(self):
"""
Set default values to fields. We assume that they are not yet populated
as this method is called just after creation of a new pk.
"""
for field_name in self._fields:
if field_name in self._init_fields:
continue
field = self.get_field(field_name)
if hasattr(field, "default"):
field.proxy_set(field.default) |
def dispatch(self, **changes):
"""
Patch the swarm with changes and then trigger the swarm.
"""
self.patch(**changes)
trigger_url = self._vr._build_url(self.resource_uri, 'swarm/')
resp = self._vr.session.post(trigger_url)
resp.raise_for_status()
try:
return resp.json()
except ValueError:
return None | Patch the swarm with changes and then trigger the swarm. | Below is the the instruction that describes the task:
### Input:
Patch the swarm with changes and then trigger the swarm.
### Response:
def dispatch(self, **changes):
"""
Patch the swarm with changes and then trigger the swarm.
"""
self.patch(**changes)
trigger_url = self._vr._build_url(self.resource_uri, 'swarm/')
resp = self._vr.session.post(trigger_url)
resp.raise_for_status()
try:
return resp.json()
except ValueError:
return None |
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups') | Below is the the instruction that describes the task:
### Input:
Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
### Response:
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) |
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while six.indexbytes(buffer, pos) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos) | Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python. | Below is the the instruction that describes the task:
### Input:
Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
### Response:
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while six.indexbytes(buffer, pos) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos) |
def _ParseFileEntry(self, knowledge_base, file_entry):
"""Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
if file_entry.link:
# Determine the timezone based on the file path.
_, _, time_zone = file_entry.link.partition('zoneinfo/')
else:
# Determine the timezone based on the timezone information file.
file_object = file_entry.GetFileObject()
time_zone = None
try:
time_zone_file = tz.tzfile(file_object)
date_time = datetime.datetime(2017, 1, 1)
time_zone = time_zone_file.tzname(date_time)
except ValueError:
# TODO: add and store preprocessing errors.
logger.error('Unable to read time zone information file.')
finally:
file_object.close()
# TODO: check if time zone is set in knowledge base.
if time_zone:
try:
knowledge_base.SetTimeZone(time_zone)
except ValueError:
# TODO: add and store preprocessing errors.
logger.error('Unable to set time zone in knowledge base.') | Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails. | Below is the the instruction that describes the task:
### Input:
Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
### Response:
def _ParseFileEntry(self, knowledge_base, file_entry):
"""Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
if file_entry.link:
# Determine the timezone based on the file path.
_, _, time_zone = file_entry.link.partition('zoneinfo/')
else:
# Determine the timezone based on the timezone information file.
file_object = file_entry.GetFileObject()
time_zone = None
try:
time_zone_file = tz.tzfile(file_object)
date_time = datetime.datetime(2017, 1, 1)
time_zone = time_zone_file.tzname(date_time)
except ValueError:
# TODO: add and store preprocessing errors.
logger.error('Unable to read time zone information file.')
finally:
file_object.close()
# TODO: check if time zone is set in knowledge base.
if time_zone:
try:
knowledge_base.SetTimeZone(time_zone)
except ValueError:
# TODO: add and store preprocessing errors.
logger.error('Unable to set time zone in knowledge base.') |
def create_function_f_c(self):
"""condition function"""
return ca.Function(
'f_c',
[self.t, self.x, self.y, self.m, self.p, self.ng, self.nu],
[self.f_c],
['t', 'x', 'y', 'm', 'p', 'ng', 'nu'], ['c'], self.func_opt) | condition function | Below is the the instruction that describes the task:
### Input:
condition function
### Response:
def create_function_f_c(self):
"""condition function"""
return ca.Function(
'f_c',
[self.t, self.x, self.y, self.m, self.p, self.ng, self.nu],
[self.f_c],
['t', 'x', 'y', 'm', 'p', 'ng', 'nu'], ['c'], self.func_opt) |
def rgb_to_hsv(self,RGB):
"linear rgb to hsv"
gammaRGB = self._gamma_rgb(RGB)
return self._ABC_to_DEF_by_fn(gammaRGB,rgb_to_hsv) | linear rgb to hsv | Below is the the instruction that describes the task:
### Input:
linear rgb to hsv
### Response:
def rgb_to_hsv(self,RGB):
"linear rgb to hsv"
gammaRGB = self._gamma_rgb(RGB)
return self._ABC_to_DEF_by_fn(gammaRGB,rgb_to_hsv) |
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True | Private method to set auto_watering program. | Below is the the instruction that describes the task:
### Input:
Private method to set auto_watering program.
### Response:
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True |
def gui():
"""remove libraries by GUI."""
sel = psidialogs.multi_choice(libraries(),
'select libraries to remove from %s!' % libraries_dir(),
title='remove boards')
print('%s selected' % sel)
if sel:
if psidialogs.ask_yes_no('Do you really want to remove selected libraries?\n' + '\n'.join(sel)):
for x in sel:
remove_lib(x)
print('%s was removed' % x) | remove libraries by GUI. | Below is the the instruction that describes the task:
### Input:
remove libraries by GUI.
### Response:
def gui():
"""remove libraries by GUI."""
sel = psidialogs.multi_choice(libraries(),
'select libraries to remove from %s!' % libraries_dir(),
title='remove boards')
print('%s selected' % sel)
if sel:
if psidialogs.ask_yes_no('Do you really want to remove selected libraries?\n' + '\n'.join(sel)):
for x in sel:
remove_lib(x)
print('%s was removed' % x) |
def auto_name_prefix(self):
"""
Generate platform prefix for cross-platform downloads.
"""
# if the platform is not native, auto_name would clobber native downloads.
# make a prefix to avoid this
native_system = std_platform.system()
native_machine = self.CPU_ALIASES.get(std_platform.machine(), std_platform.machine())
if native_system == self.system and native_machine == self.machine:
return ''
platform = {
'linux': 'linux32',
'android-api-16': 'android-arm',
'android-aarch64': 'android-arm64',
}.get(self.gecko_platform, self.gecko_platform)
return platform + '-' | Generate platform prefix for cross-platform downloads. | Below is the the instruction that describes the task:
### Input:
Generate platform prefix for cross-platform downloads.
### Response:
def auto_name_prefix(self):
"""
Generate platform prefix for cross-platform downloads.
"""
# if the platform is not native, auto_name would clobber native downloads.
# make a prefix to avoid this
native_system = std_platform.system()
native_machine = self.CPU_ALIASES.get(std_platform.machine(), std_platform.machine())
if native_system == self.system and native_machine == self.machine:
return ''
platform = {
'linux': 'linux32',
'android-api-16': 'android-arm',
'android-aarch64': 'android-arm64',
}.get(self.gecko_platform, self.gecko_platform)
return platform + '-' |
def set_sim_params(self, nparams, attr_params):
"""Store parameters in `params` in `h5file.root.parameters`.
`nparams` (dict)
A dict as returned by `get_params()` in `ParticlesSimulation()`
The format is:
keys:
used as parameter name
values: (2-elements tuple)
first element is the parameter value
second element is a string used as "title" (description)
`attr_params` (dict)
A dict whole items are stored as attributes in '/parameters'
"""
for name, value in nparams.items():
val = value[0] if value[0] is not None else 'none'
self.h5file.create_array('/parameters', name, obj=val,
title=value[1])
for name, value in attr_params.items():
self.h5file.set_node_attr('/parameters', name, value) | Store parameters in `params` in `h5file.root.parameters`.
`nparams` (dict)
A dict as returned by `get_params()` in `ParticlesSimulation()`
The format is:
keys:
used as parameter name
values: (2-elements tuple)
first element is the parameter value
second element is a string used as "title" (description)
`attr_params` (dict)
A dict whole items are stored as attributes in '/parameters' | Below is the the instruction that describes the task:
### Input:
Store parameters in `params` in `h5file.root.parameters`.
`nparams` (dict)
A dict as returned by `get_params()` in `ParticlesSimulation()`
The format is:
keys:
used as parameter name
values: (2-elements tuple)
first element is the parameter value
second element is a string used as "title" (description)
`attr_params` (dict)
A dict whole items are stored as attributes in '/parameters'
### Response:
def set_sim_params(self, nparams, attr_params):
"""Store parameters in `params` in `h5file.root.parameters`.
`nparams` (dict)
A dict as returned by `get_params()` in `ParticlesSimulation()`
The format is:
keys:
used as parameter name
values: (2-elements tuple)
first element is the parameter value
second element is a string used as "title" (description)
`attr_params` (dict)
A dict whole items are stored as attributes in '/parameters'
"""
for name, value in nparams.items():
val = value[0] if value[0] is not None else 'none'
self.h5file.create_array('/parameters', name, obj=val,
title=value[1])
for name, value in attr_params.items():
self.h5file.set_node_attr('/parameters', name, value) |
def assignment(self):
"""
assignment ::= relation_name assign expression |
relation_name param_start attribute_list param_stop
assign expression
"""
lhs = Group(self.relation_name +
Optional(self.parenthesize(self.attribute_list)))
return Group(lhs + Keyword(self.syntax.assign_op) + self.expression) | assignment ::= relation_name assign expression |
relation_name param_start attribute_list param_stop
assign expression | Below is the the instruction that describes the task:
### Input:
assignment ::= relation_name assign expression |
relation_name param_start attribute_list param_stop
assign expression
### Response:
def assignment(self):
"""
assignment ::= relation_name assign expression |
relation_name param_start attribute_list param_stop
assign expression
"""
lhs = Group(self.relation_name +
Optional(self.parenthesize(self.attribute_list)))
return Group(lhs + Keyword(self.syntax.assign_op) + self.expression) |
def is_rfc1918(ip):
"""Checks to see if an IP address is used for local communications within
a private network as specified by RFC 1918
"""
if ip_between(ip, "10.0.0.0", "10.255.255.255"):
return True
elif ip_between(ip, "172.16.0.0", "172.31.255.255"):
return True
elif ip_between(ip, "192.168.0.0", "192.168.255.255"):
return True
else:
return False | Checks to see if an IP address is used for local communications within
a private network as specified by RFC 1918 | Below is the the instruction that describes the task:
### Input:
Checks to see if an IP address is used for local communications within
a private network as specified by RFC 1918
### Response:
def is_rfc1918(ip):
"""Checks to see if an IP address is used for local communications within
a private network as specified by RFC 1918
"""
if ip_between(ip, "10.0.0.0", "10.255.255.255"):
return True
elif ip_between(ip, "172.16.0.0", "172.31.255.255"):
return True
elif ip_between(ip, "192.168.0.0", "192.168.255.255"):
return True
else:
return False |
async def destroy_unit(self, *unit_names):
"""Destroy units by name.
"""
connection = self.connection()
app_facade = client.ApplicationFacade.from_connection(connection)
log.debug(
'Destroying unit%s %s',
's' if len(unit_names) == 1 else '',
' '.join(unit_names))
return await app_facade.DestroyUnits(list(unit_names)) | Destroy units by name. | Below is the the instruction that describes the task:
### Input:
Destroy units by name.
### Response:
async def destroy_unit(self, *unit_names):
"""Destroy units by name.
"""
connection = self.connection()
app_facade = client.ApplicationFacade.from_connection(connection)
log.debug(
'Destroying unit%s %s',
's' if len(unit_names) == 1 else '',
' '.join(unit_names))
return await app_facade.DestroyUnits(list(unit_names)) |
def _create_file(self, filename):
"""Ensure a new file is created and opened for writing."""
file_descriptor = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
return os.fdopen(file_descriptor, 'w') | Ensure a new file is created and opened for writing. | Below is the the instruction that describes the task:
### Input:
Ensure a new file is created and opened for writing.
### Response:
def _create_file(self, filename):
"""Ensure a new file is created and opened for writing."""
file_descriptor = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
return os.fdopen(file_descriptor, 'w') |
def stop_all_children(self):
'''Kill all workers.'''
# There's an unfortunate race condition if we try to log this
# case: we can't depend on the logging child actually receiving
# the log message before we kill it off. C'est la vie...
self.stop_log_child()
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
os.waitpid(pid, 0)
except OSError, e:
if e.errno == errno.ESRCH or e.errno == errno.ECHILD:
# No such process
pass
else:
raise | Kill all workers. | Below is the the instruction that describes the task:
### Input:
Kill all workers.
### Response:
def stop_all_children(self):
'''Kill all workers.'''
# There's an unfortunate race condition if we try to log this
# case: we can't depend on the logging child actually receiving
# the log message before we kill it off. C'est la vie...
self.stop_log_child()
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
os.waitpid(pid, 0)
except OSError, e:
if e.errno == errno.ESRCH or e.errno == errno.ECHILD:
# No such process
pass
else:
raise |
def plot_hpars(HDD, hpars, sym):
"""
function to plot hysteresis parameters
deprecated (used in hysteresis_magic)
"""
plt.figure(num=HDD['hyst'])
X, Y = [], []
X.append(0)
Y.append(old_div(float(hpars['hysteresis_mr_moment']), float(
hpars['hysteresis_ms_moment'])))
X.append(float(hpars['hysteresis_bc']))
Y.append(0)
plt.plot(X, Y, sym)
bounds = plt.axis()
n4 = 'Ms: ' + '%8.2e' % (float(hpars['hysteresis_ms_moment'])) + ' Am^2'
plt.text(bounds[1] - .9 * bounds[1], -.9, n4)
n1 = 'Mr: ' + '%8.2e' % (float(hpars['hysteresis_mr_moment'])) + ' Am^2'
plt.text(bounds[1] - .9 * bounds[1], -.7, n1)
n2 = 'Bc: ' + '%8.2e' % (float(hpars['hysteresis_bc'])) + ' T'
plt.text(bounds[1] - .9 * bounds[1], -.5, n2)
if 'hysteresis_xhf' in list(hpars.keys()):
n3 = r'Xhf: ' + '%8.2e' % (float(hpars['hysteresis_xhf'])) + ' m^3'
plt.text(bounds[1] - .9 * bounds[1], -.3, n3)
plt.figure(num=HDD['deltaM'])
X, Y, Bcr = [], [], ""
if 'hysteresis_bcr' in list(hpars.keys()):
X.append(float(hpars['hysteresis_bcr']))
Y.append(0)
Bcr = float(hpars['hysteresis_bcr'])
plt.plot(X, Y, sym)
bounds = plt.axis()
if Bcr != "":
n1 = 'Bcr: ' + '%8.2e' % (Bcr) + ' T'
plt.text(bounds[1] - .5 * bounds[1], .9 * bounds[3], n1) | function to plot hysteresis parameters
deprecated (used in hysteresis_magic) | Below is the the instruction that describes the task:
### Input:
function to plot hysteresis parameters
deprecated (used in hysteresis_magic)
### Response:
def plot_hpars(HDD, hpars, sym):
"""
function to plot hysteresis parameters
deprecated (used in hysteresis_magic)
"""
plt.figure(num=HDD['hyst'])
X, Y = [], []
X.append(0)
Y.append(old_div(float(hpars['hysteresis_mr_moment']), float(
hpars['hysteresis_ms_moment'])))
X.append(float(hpars['hysteresis_bc']))
Y.append(0)
plt.plot(X, Y, sym)
bounds = plt.axis()
n4 = 'Ms: ' + '%8.2e' % (float(hpars['hysteresis_ms_moment'])) + ' Am^2'
plt.text(bounds[1] - .9 * bounds[1], -.9, n4)
n1 = 'Mr: ' + '%8.2e' % (float(hpars['hysteresis_mr_moment'])) + ' Am^2'
plt.text(bounds[1] - .9 * bounds[1], -.7, n1)
n2 = 'Bc: ' + '%8.2e' % (float(hpars['hysteresis_bc'])) + ' T'
plt.text(bounds[1] - .9 * bounds[1], -.5, n2)
if 'hysteresis_xhf' in list(hpars.keys()):
n3 = r'Xhf: ' + '%8.2e' % (float(hpars['hysteresis_xhf'])) + ' m^3'
plt.text(bounds[1] - .9 * bounds[1], -.3, n3)
plt.figure(num=HDD['deltaM'])
X, Y, Bcr = [], [], ""
if 'hysteresis_bcr' in list(hpars.keys()):
X.append(float(hpars['hysteresis_bcr']))
Y.append(0)
Bcr = float(hpars['hysteresis_bcr'])
plt.plot(X, Y, sym)
bounds = plt.axis()
if Bcr != "":
n1 = 'Bcr: ' + '%8.2e' % (Bcr) + ' T'
plt.text(bounds[1] - .5 * bounds[1], .9 * bounds[3], n1) |
def tables(self):
"""
:return: all tables stored in this database
"""
cursor = self.connection.cursor()
cursor.execute("show tables in %s" % self.db)
self._tables = [t.Table(r[0], con=self.connection, db=self.db) for r in cursor.fetchall()]
return self._tables | :return: all tables stored in this database | Below is the the instruction that describes the task:
### Input:
:return: all tables stored in this database
### Response:
def tables(self):
"""
:return: all tables stored in this database
"""
cursor = self.connection.cursor()
cursor.execute("show tables in %s" % self.db)
self._tables = [t.Table(r[0], con=self.connection, db=self.db) for r in cursor.fetchall()]
return self._tables |
def remove_and_append(self, index):
"""Remove previous entrances of a tab, and add it as the latest."""
while index in self:
self.remove(index)
self.append(index) | Remove previous entrances of a tab, and add it as the latest. | Below is the the instruction that describes the task:
### Input:
Remove previous entrances of a tab, and add it as the latest.
### Response:
def remove_and_append(self, index):
"""Remove previous entrances of a tab, and add it as the latest."""
while index in self:
self.remove(index)
self.append(index) |
def handle_error(self, request: Any, status: int = 500, exc: Any = None, message: Optional[str] = None) -> web.Response:
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection."""
if self.transport is None:
# client has been disconnected during writing.
if self._access_log:
request_ip = RequestHandler.get_request_ip(request, None)
version_string = None
if isinstance(request.version, HttpVersion):
version_string = 'HTTP/{}.{}'.format(request.version.major, request.version.minor)
logging.getLogger('transport.http').info('[{}] [{}] {} {} "{} {}{}{}" - {} "{}" -'.format(
RequestHandler.colorize_status('http', 499),
RequestHandler.colorize_status(499),
request_ip or '',
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
request.method,
request.path,
'?{}'.format(request.query_string) if request.query_string else '',
' {}'.format(version_string) if version_string else '',
request.content_length if request.content_length is not None else '-',
request.headers.get('User-Agent', '').replace('"', '')
))
headers = {}
headers[hdrs.CONTENT_TYPE] = 'text/plain; charset=utf-8'
msg = '' if status == 500 or not message else message
headers[hdrs.CONTENT_LENGTH] = str(len(msg))
headers[hdrs.SERVER] = self._server_header or ''
resp = web.Response(status=status, # type: ignore
text=msg,
headers=headers) # type: web.Response
resp.force_close() # type: ignore
# some data already got sent, connection is broken
if request.writer.output_size > 0 or self.transport is None:
self.force_close() # type: ignore
elif self.transport is not None:
request_ip = RequestHandler.get_request_ip(request, None)
if not request_ip:
peername = request.transport.get_extra_info('peername')
if peername:
request_ip, _ = peername
if self._access_log:
logging.getLogger('transport.http').info('[{}] [{}] {} {} "INVALID" {} - "" -'.format(
RequestHandler.colorize_status('http', status),
RequestHandler.colorize_status(status),
request_ip or '',
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
len(msg)
))
return resp | Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection. | Below is the the instruction that describes the task:
### Input:
Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection.
### Response:
def handle_error(self, request: Any, status: int = 500, exc: Any = None, message: Optional[str] = None) -> web.Response:
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection."""
if self.transport is None:
# client has been disconnected during writing.
if self._access_log:
request_ip = RequestHandler.get_request_ip(request, None)
version_string = None
if isinstance(request.version, HttpVersion):
version_string = 'HTTP/{}.{}'.format(request.version.major, request.version.minor)
logging.getLogger('transport.http').info('[{}] [{}] {} {} "{} {}{}{}" - {} "{}" -'.format(
RequestHandler.colorize_status('http', 499),
RequestHandler.colorize_status(499),
request_ip or '',
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
request.method,
request.path,
'?{}'.format(request.query_string) if request.query_string else '',
' {}'.format(version_string) if version_string else '',
request.content_length if request.content_length is not None else '-',
request.headers.get('User-Agent', '').replace('"', '')
))
headers = {}
headers[hdrs.CONTENT_TYPE] = 'text/plain; charset=utf-8'
msg = '' if status == 500 or not message else message
headers[hdrs.CONTENT_LENGTH] = str(len(msg))
headers[hdrs.SERVER] = self._server_header or ''
resp = web.Response(status=status, # type: ignore
text=msg,
headers=headers) # type: web.Response
resp.force_close() # type: ignore
# some data already got sent, connection is broken
if request.writer.output_size > 0 or self.transport is None:
self.force_close() # type: ignore
elif self.transport is not None:
request_ip = RequestHandler.get_request_ip(request, None)
if not request_ip:
peername = request.transport.get_extra_info('peername')
if peername:
request_ip, _ = peername
if self._access_log:
logging.getLogger('transport.http').info('[{}] [{}] {} {} "INVALID" {} - "" -'.format(
RequestHandler.colorize_status('http', status),
RequestHandler.colorize_status(status),
request_ip or '',
'"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-',
len(msg)
))
return resp |
def has_attr(cls, attr_name):
"""Check to see if an attribute is defined for the model."""
if attr_name in cls.attrs:
return True
if isinstance(cls.primary_key_name, str) and cls.primary_key_name == attr_name:
return True
if isinstance(cls.primary_key_name, tuple) and attr_name in cls.primary_key_name:
return True
if cls.timestamps is not None and attr_name in cls.timestamps:
return True
return False | Check to see if an attribute is defined for the model. | Below is the the instruction that describes the task:
### Input:
Check to see if an attribute is defined for the model.
### Response:
def has_attr(cls, attr_name):
"""Check to see if an attribute is defined for the model."""
if attr_name in cls.attrs:
return True
if isinstance(cls.primary_key_name, str) and cls.primary_key_name == attr_name:
return True
if isinstance(cls.primary_key_name, tuple) and attr_name in cls.primary_key_name:
return True
if cls.timestamps is not None and attr_name in cls.timestamps:
return True
return False |
def build_dict(img_iterator):
"""
Build a dict from files from iterator.
{'absolute_filename': {'EXIF field': 'exif tag value'}}
Parse DateTime from filename in the same loop, added as 'TIME'.
"""
files_with_tags = dict()
for f in img_iterator:
with open(str(f.abspath()), 'rb') as jpg:
tags = exifread.process_file(jpg)
# Dont waste space on thumbnails
try:
del tags['JPEGThumbnail']
except KeyError:
pass
tags['TIME'] = get_time(str(f.abspath()), tags)
files_with_tags[str(f.abspath())] = tags
return files_with_tags | Build a dict from files from iterator.
{'absolute_filename': {'EXIF field': 'exif tag value'}}
Parse DateTime from filename in the same loop, added as 'TIME'. | Below is the the instruction that describes the task:
### Input:
Build a dict from files from iterator.
{'absolute_filename': {'EXIF field': 'exif tag value'}}
Parse DateTime from filename in the same loop, added as 'TIME'.
### Response:
def build_dict(img_iterator):
"""
Build a dict from files from iterator.
{'absolute_filename': {'EXIF field': 'exif tag value'}}
Parse DateTime from filename in the same loop, added as 'TIME'.
"""
files_with_tags = dict()
for f in img_iterator:
with open(str(f.abspath()), 'rb') as jpg:
tags = exifread.process_file(jpg)
# Dont waste space on thumbnails
try:
del tags['JPEGThumbnail']
except KeyError:
pass
tags['TIME'] = get_time(str(f.abspath()), tags)
files_with_tags[str(f.abspath())] = tags
return files_with_tags |
def has_comment(src):
"""Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
Boolean: True if source has a comment.
"""
readline = StringIO(src).readline
toktypes = set()
try:
for t in tokenize.generate_tokens(readline):
toktypes.add(t[0])
except tokenize.TokenError:
pass
return(tokenize.COMMENT in toktypes) | Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
Boolean: True if source has a comment. | Below is the the instruction that describes the task:
### Input:
Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
Boolean: True if source has a comment.
### Response:
def has_comment(src):
"""Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
Boolean: True if source has a comment.
"""
readline = StringIO(src).readline
toktypes = set()
try:
for t in tokenize.generate_tokens(readline):
toktypes.add(t[0])
except tokenize.TokenError:
pass
return(tokenize.COMMENT in toktypes) |
def get(self):
"""API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided".
"""
parser = reqparse.RequestParser()
parser.add_argument('transaction_id', type=str, required=True)
args = parser.parse_args(strict=True)
tx_id = args['transaction_id']
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
blocks = bigchain.get_block_containing_tx(tx_id)
return blocks | API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided". | Below is the the instruction that describes the task:
### Input:
API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided".
### Response:
def get(self):
"""API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided".
"""
parser = reqparse.RequestParser()
parser.add_argument('transaction_id', type=str, required=True)
args = parser.parse_args(strict=True)
tx_id = args['transaction_id']
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
blocks = bigchain.get_block_containing_tx(tx_id)
return blocks |
def infer(cls, nij, Ti, root_state, fixed_pi=None, pc=5.0, gap_limit=0.01, **kwargs):
"""
Infer a GTR model by specifying the number of transitions and time spent in each
character. The basic equation that is being solved is
:math:`n_{ij} = pi_i W_{ij} T_j`
where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium
state frequencies, :math:`W_{ij}` is the "substitution attempt matrix",
while :math:`T_i` is the time on the tree spent in character state
:math:`i`. To regularize the process, we add pseudocounts and also need
to account for the fact that the root of the tree is in a particular
state. the modified equation is
:math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\_state)`
Parameters
----------
nij : nxn matrix
The number of times a change in character state is observed
between state j and i
Ti :n vector
The time spent in each character state
root_state : n vector
The number of characters in state i in the sequence
of the root node.
pc : float
Pseudocounts, this determines the lower cutoff on the rate when
no substitutions are observed
**kwargs:
Key word arguments to be passed
Keyword Args
------------
alphabet : str
Specify alphabet when applicable. If the alphabet specification
is required, but no alphabet is specified, the nucleotide alphabet will be used as default.
"""
from scipy import linalg as LA
gtr = cls(**kwargs)
gtr.logger("GTR: model inference ",1)
dp = 1e-5
Nit = 40
pc_mat = pc*np.ones_like(nij)
np.fill_diagonal(pc_mat, 0.0)
count = 0
pi_old = np.zeros_like(Ti)
if fixed_pi is None:
pi = np.ones_like(Ti)
else:
pi = np.copy(fixed_pi)
pi/=pi.sum()
W_ij = np.ones_like(nij)
mu = nij.sum()/Ti.sum()
# if pi is fixed, this will immediately converge
while LA.norm(pi_old-pi) > dp and count < Nit:
gtr.logger(' '.join(map(str, ['GTR inference iteration',count,'change:',LA.norm(pi_old-pi)])), 3)
count += 1
pi_old = np.copy(pi)
W_ij = (nij+nij.T+2*pc_mat)/mu/(np.outer(pi,Ti) + np.outer(Ti,pi)
+ ttconf.TINY_NUMBER + 2*pc_mat)
np.fill_diagonal(W_ij, 0)
scale_factor = np.einsum('i,ij,j',pi,W_ij,pi)
W_ij = W_ij/scale_factor
if fixed_pi is None:
pi = (np.sum(nij+pc_mat,axis=1)+root_state)/(ttconf.TINY_NUMBER + mu*np.dot(W_ij,Ti)+root_state.sum()+np.sum(pc_mat, axis=1))
pi /= pi.sum()
mu = nij.sum()/(ttconf.TINY_NUMBER + np.sum(pi * (W_ij.dot(Ti))))
if count >= Nit:
gtr.logger('WARNING: maximum number of iterations has been reached in GTR inference',3, warn=True)
if LA.norm(pi_old-pi) > dp:
gtr.logger('the iterative scheme has not converged',3,warn=True)
elif np.abs(1-np.max(pi.sum(axis=0))) > dp:
gtr.logger('the iterative scheme has converged, but proper normalization was not reached',3,warn=True)
if gtr.gap_index is not None:
if pi[gtr.gap_index]<gap_limit:
gtr.logger('The model allows for gaps which are estimated to occur at a low fraction of %1.3e'%pi[gtr.gap_index]+
'\n\t\tthis can potentially result in artificats.'+
'\n\t\tgap fraction will be set to %1.4f'%gap_limit,2,warn=True)
pi[gtr.gap_index] = gap_limit
pi /= pi.sum()
gtr.assign_rates(mu=mu, W=W_ij, pi=pi)
return gtr | Infer a GTR model by specifying the number of transitions and time spent in each
character. The basic equation that is being solved is
:math:`n_{ij} = pi_i W_{ij} T_j`
where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium
state frequencies, :math:`W_{ij}` is the "substitution attempt matrix",
while :math:`T_i` is the time on the tree spent in character state
:math:`i`. To regularize the process, we add pseudocounts and also need
to account for the fact that the root of the tree is in a particular
state. the modified equation is
:math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\_state)`
Parameters
----------
nij : nxn matrix
The number of times a change in character state is observed
between state j and i
Ti :n vector
The time spent in each character state
root_state : n vector
The number of characters in state i in the sequence
of the root node.
pc : float
Pseudocounts, this determines the lower cutoff on the rate when
no substitutions are observed
**kwargs:
Key word arguments to be passed
Keyword Args
------------
alphabet : str
Specify alphabet when applicable. If the alphabet specification
is required, but no alphabet is specified, the nucleotide alphabet will be used as default. | Below is the the instruction that describes the task:
### Input:
Infer a GTR model by specifying the number of transitions and time spent in each
character. The basic equation that is being solved is
:math:`n_{ij} = pi_i W_{ij} T_j`
where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium
state frequencies, :math:`W_{ij}` is the "substitution attempt matrix",
while :math:`T_i` is the time on the tree spent in character state
:math:`i`. To regularize the process, we add pseudocounts and also need
to account for the fact that the root of the tree is in a particular
state. the modified equation is
:math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\_state)`
Parameters
----------
nij : nxn matrix
The number of times a change in character state is observed
between state j and i
Ti :n vector
The time spent in each character state
root_state : n vector
The number of characters in state i in the sequence
of the root node.
pc : float
Pseudocounts, this determines the lower cutoff on the rate when
no substitutions are observed
**kwargs:
Key word arguments to be passed
Keyword Args
------------
alphabet : str
Specify alphabet when applicable. If the alphabet specification
is required, but no alphabet is specified, the nucleotide alphabet will be used as default.
### Response:
def infer(cls, nij, Ti, root_state, fixed_pi=None, pc=5.0, gap_limit=0.01, **kwargs):
"""
Infer a GTR model by specifying the number of transitions and time spent in each
character. The basic equation that is being solved is
:math:`n_{ij} = pi_i W_{ij} T_j`
where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium
state frequencies, :math:`W_{ij}` is the "substitution attempt matrix",
while :math:`T_i` is the time on the tree spent in character state
:math:`i`. To regularize the process, we add pseudocounts and also need
to account for the fact that the root of the tree is in a particular
state. the modified equation is
:math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\_state)`
Parameters
----------
nij : nxn matrix
The number of times a change in character state is observed
between state j and i
Ti :n vector
The time spent in each character state
root_state : n vector
The number of characters in state i in the sequence
of the root node.
pc : float
Pseudocounts, this determines the lower cutoff on the rate when
no substitutions are observed
**kwargs:
Key word arguments to be passed
Keyword Args
------------
alphabet : str
Specify alphabet when applicable. If the alphabet specification
is required, but no alphabet is specified, the nucleotide alphabet will be used as default.
"""
from scipy import linalg as LA
gtr = cls(**kwargs)
gtr.logger("GTR: model inference ",1)
dp = 1e-5
Nit = 40
pc_mat = pc*np.ones_like(nij)
np.fill_diagonal(pc_mat, 0.0)
count = 0
pi_old = np.zeros_like(Ti)
if fixed_pi is None:
pi = np.ones_like(Ti)
else:
pi = np.copy(fixed_pi)
pi/=pi.sum()
W_ij = np.ones_like(nij)
mu = nij.sum()/Ti.sum()
# if pi is fixed, this will immediately converge
while LA.norm(pi_old-pi) > dp and count < Nit:
gtr.logger(' '.join(map(str, ['GTR inference iteration',count,'change:',LA.norm(pi_old-pi)])), 3)
count += 1
pi_old = np.copy(pi)
W_ij = (nij+nij.T+2*pc_mat)/mu/(np.outer(pi,Ti) + np.outer(Ti,pi)
+ ttconf.TINY_NUMBER + 2*pc_mat)
np.fill_diagonal(W_ij, 0)
scale_factor = np.einsum('i,ij,j',pi,W_ij,pi)
W_ij = W_ij/scale_factor
if fixed_pi is None:
pi = (np.sum(nij+pc_mat,axis=1)+root_state)/(ttconf.TINY_NUMBER + mu*np.dot(W_ij,Ti)+root_state.sum()+np.sum(pc_mat, axis=1))
pi /= pi.sum()
mu = nij.sum()/(ttconf.TINY_NUMBER + np.sum(pi * (W_ij.dot(Ti))))
if count >= Nit:
gtr.logger('WARNING: maximum number of iterations has been reached in GTR inference',3, warn=True)
if LA.norm(pi_old-pi) > dp:
gtr.logger('the iterative scheme has not converged',3,warn=True)
elif np.abs(1-np.max(pi.sum(axis=0))) > dp:
gtr.logger('the iterative scheme has converged, but proper normalization was not reached',3,warn=True)
if gtr.gap_index is not None:
if pi[gtr.gap_index]<gap_limit:
gtr.logger('The model allows for gaps which are estimated to occur at a low fraction of %1.3e'%pi[gtr.gap_index]+
'\n\t\tthis can potentially result in artificats.'+
'\n\t\tgap fraction will be set to %1.4f'%gap_limit,2,warn=True)
pi[gtr.gap_index] = gap_limit
pi /= pi.sum()
gtr.assign_rates(mu=mu, W=W_ij, pi=pi)
return gtr |
def get_region_for_chip(x, y, level=3):
"""Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected.
"""
shift = 6 - 2*level
bit = ((x >> shift) & 3) + 4*((y >> shift) & 3) # bit in bits 15:0 to set
mask = 0xffff ^ ((4 << shift) - 1) # in {0xfffc, 0xfff0, 0xffc0, 0xff00}
nx = x & mask # The mask guarantees that bits 1:0 will be cleared
ny = y & mask # The mask guarantees that bits 1:0 will be cleared
# sig bits x | sig bits y | 2-bit level | region select bits
region = (nx << 24) | (ny << 16) | (level << 16) | (1 << bit)
return region | Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected. | Below is the the instruction that describes the task:
### Input:
Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected.
### Response:
def get_region_for_chip(x, y, level=3):
"""Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected.
"""
shift = 6 - 2*level
bit = ((x >> shift) & 3) + 4*((y >> shift) & 3) # bit in bits 15:0 to set
mask = 0xffff ^ ((4 << shift) - 1) # in {0xfffc, 0xfff0, 0xffc0, 0xff00}
nx = x & mask # The mask guarantees that bits 1:0 will be cleared
ny = y & mask # The mask guarantees that bits 1:0 will be cleared
# sig bits x | sig bits y | 2-bit level | region select bits
region = (nx << 24) | (ny << 16) | (level << 16) | (1 << bit)
return region |
def iter_filtered_dir_entry(dir_entries, match_patterns, on_skip):
"""
Filter a list of DirEntryPath instances with the given pattern
:param dir_entries: list of DirEntryPath instances
:param match_patterns: used with Path.match()
e.g.: "__pycache__/*", "*.tmp", "*.cache"
:param on_skip: function that will be called if 'match_patterns' hits.
e.g.:
def on_skip(entry, pattern):
log.error("Skip pattern %r hit: %s" % (pattern, entry.path))
:return: yields None or DirEntryPath instances
"""
def match(dir_entry_path, match_patterns, on_skip):
for match_pattern in match_patterns:
if dir_entry_path.path_instance.match(match_pattern):
on_skip(dir_entry_path, match_pattern)
return True
return False
for entry in dir_entries:
try:
dir_entry_path = DirEntryPath(entry)
except FileNotFoundError as err:
# e.g.: A file was deleted after the first filesystem scan
# Will be obsolete if we use shadow-copy / snapshot function from filesystem
# see: https://github.com/jedie/PyHardLinkBackup/issues/6
log.error("Can't make DirEntryPath() instance: %s" % err)
continue
if match(dir_entry_path, match_patterns, on_skip):
yield None
else:
yield dir_entry_path | Filter a list of DirEntryPath instances with the given pattern
:param dir_entries: list of DirEntryPath instances
:param match_patterns: used with Path.match()
e.g.: "__pycache__/*", "*.tmp", "*.cache"
:param on_skip: function that will be called if 'match_patterns' hits.
e.g.:
def on_skip(entry, pattern):
log.error("Skip pattern %r hit: %s" % (pattern, entry.path))
:return: yields None or DirEntryPath instances | Below is the the instruction that describes the task:
### Input:
Filter a list of DirEntryPath instances with the given pattern
:param dir_entries: list of DirEntryPath instances
:param match_patterns: used with Path.match()
e.g.: "__pycache__/*", "*.tmp", "*.cache"
:param on_skip: function that will be called if 'match_patterns' hits.
e.g.:
def on_skip(entry, pattern):
log.error("Skip pattern %r hit: %s" % (pattern, entry.path))
:return: yields None or DirEntryPath instances
### Response:
def iter_filtered_dir_entry(dir_entries, match_patterns, on_skip):
"""
Filter a list of DirEntryPath instances with the given pattern
:param dir_entries: list of DirEntryPath instances
:param match_patterns: used with Path.match()
e.g.: "__pycache__/*", "*.tmp", "*.cache"
:param on_skip: function that will be called if 'match_patterns' hits.
e.g.:
def on_skip(entry, pattern):
log.error("Skip pattern %r hit: %s" % (pattern, entry.path))
:return: yields None or DirEntryPath instances
"""
def match(dir_entry_path, match_patterns, on_skip):
for match_pattern in match_patterns:
if dir_entry_path.path_instance.match(match_pattern):
on_skip(dir_entry_path, match_pattern)
return True
return False
for entry in dir_entries:
try:
dir_entry_path = DirEntryPath(entry)
except FileNotFoundError as err:
# e.g.: A file was deleted after the first filesystem scan
# Will be obsolete if we use shadow-copy / snapshot function from filesystem
# see: https://github.com/jedie/PyHardLinkBackup/issues/6
log.error("Can't make DirEntryPath() instance: %s" % err)
continue
if match(dir_entry_path, match_patterns, on_skip):
yield None
else:
yield dir_entry_path |
def newTextReaderFilename(URI):
"""Create an xmlTextReader structure fed with the resource at
@URI """
ret = libxml2mod.xmlNewTextReaderFilename(URI)
if ret is None:raise treeError('xmlNewTextReaderFilename() failed')
return xmlTextReader(_obj=ret) | Create an xmlTextReader structure fed with the resource at
@URI | Below is the the instruction that describes the task:
### Input:
Create an xmlTextReader structure fed with the resource at
@URI
### Response:
def newTextReaderFilename(URI):
"""Create an xmlTextReader structure fed with the resource at
@URI """
ret = libxml2mod.xmlNewTextReaderFilename(URI)
if ret is None:raise treeError('xmlNewTextReaderFilename() failed')
return xmlTextReader(_obj=ret) |
def verify_words(self):
"""Verify the fields source, imagery_used and comment of the changeset
for some suspect words.
"""
if self.comment:
if find_words(self.comment, self.suspect_words, self.excluded_words):
self.label_suspicious('suspect_word')
if self.source:
for word in self.illegal_sources:
if word in self.source.lower():
self.label_suspicious('suspect_word')
break
if self.imagery_used:
for word in self.illegal_sources:
if word in self.imagery_used.lower():
self.label_suspicious('suspect_word')
break
self.suspicion_reasons = list(set(self.suspicion_reasons)) | Verify the fields source, imagery_used and comment of the changeset
for some suspect words. | Below is the the instruction that describes the task:
### Input:
Verify the fields source, imagery_used and comment of the changeset
for some suspect words.
### Response:
def verify_words(self):
"""Verify the fields source, imagery_used and comment of the changeset
for some suspect words.
"""
if self.comment:
if find_words(self.comment, self.suspect_words, self.excluded_words):
self.label_suspicious('suspect_word')
if self.source:
for word in self.illegal_sources:
if word in self.source.lower():
self.label_suspicious('suspect_word')
break
if self.imagery_used:
for word in self.illegal_sources:
if word in self.imagery_used.lower():
self.label_suspicious('suspect_word')
break
self.suspicion_reasons = list(set(self.suspicion_reasons)) |
def set_data(self, data):
"""
Set a new data buffer.
:Parameters:
`data` : a basestring object
New data buffer.
"""
if not isinstance(data, basestring):
raise ValueError("data must an instance of basestring")
self._data = data
self._current = 0 | Set a new data buffer.
:Parameters:
`data` : a basestring object
New data buffer. | Below is the the instruction that describes the task:
### Input:
Set a new data buffer.
:Parameters:
`data` : a basestring object
New data buffer.
### Response:
def set_data(self, data):
"""
Set a new data buffer.
:Parameters:
`data` : a basestring object
New data buffer.
"""
if not isinstance(data, basestring):
raise ValueError("data must an instance of basestring")
self._data = data
self._current = 0 |
def add(self, layers, above=None, below=None):
""" Add one or more layers to the stack of masking layers.
Args:
layers: A string, NiBabel image, list, or dict. If anything other
than a dict is passed, assigns sequential layer names based on
the current position in stack; if a dict, uses key as the name
and value as the mask image.
"""
def add_named_layer(name, image):
image = self.get_image(image, output='vector')
if above is not None:
image[image < above] = 0.
if below is not None:
image[image > below] = 0.
self.layers[name] = image
self.stack.append(name)
if isinstance(layers, dict):
for (name, image) in layers.items():
add_named_layer(name, image)
else:
if not isinstance(layers, list):
layers = [layers]
for image in layers:
name = 'layer_%d' % len(self.stack)
add_named_layer(name, image)
self.set_mask() | Add one or more layers to the stack of masking layers.
Args:
layers: A string, NiBabel image, list, or dict. If anything other
than a dict is passed, assigns sequential layer names based on
the current position in stack; if a dict, uses key as the name
and value as the mask image. | Below is the the instruction that describes the task:
### Input:
Add one or more layers to the stack of masking layers.
Args:
layers: A string, NiBabel image, list, or dict. If anything other
than a dict is passed, assigns sequential layer names based on
the current position in stack; if a dict, uses key as the name
and value as the mask image.
### Response:
def add(self, layers, above=None, below=None):
""" Add one or more layers to the stack of masking layers.
Args:
layers: A string, NiBabel image, list, or dict. If anything other
than a dict is passed, assigns sequential layer names based on
the current position in stack; if a dict, uses key as the name
and value as the mask image.
"""
def add_named_layer(name, image):
image = self.get_image(image, output='vector')
if above is not None:
image[image < above] = 0.
if below is not None:
image[image > below] = 0.
self.layers[name] = image
self.stack.append(name)
if isinstance(layers, dict):
for (name, image) in layers.items():
add_named_layer(name, image)
else:
if not isinstance(layers, list):
layers = [layers]
for image in layers:
name = 'layer_%d' % len(self.stack)
add_named_layer(name, image)
self.set_mask() |
def expires(self):
"""Union[datetime.datetime, None]: Datetime at which the table will be
deleted.
Raises:
ValueError: For invalid value types.
"""
expiration_time = self._properties.get("expirationTime")
if expiration_time is not None:
# expiration_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(expiration_time)
) | Union[datetime.datetime, None]: Datetime at which the table will be
deleted.
Raises:
ValueError: For invalid value types. | Below is the the instruction that describes the task:
### Input:
Union[datetime.datetime, None]: Datetime at which the table will be
deleted.
Raises:
ValueError: For invalid value types.
### Response:
def expires(self):
"""Union[datetime.datetime, None]: Datetime at which the table will be
deleted.
Raises:
ValueError: For invalid value types.
"""
expiration_time = self._properties.get("expirationTime")
if expiration_time is not None:
# expiration_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(expiration_time)
) |
def reset(self):
# pylint: disable=not-context-manager
"""Reset scheduler::
* Remove waiting results
* Clear checks and actions lists
:return: None
"""
logger.info("Scheduling loop reset")
with self.waiting_results.mutex:
self.waiting_results.queue.clear()
self.checks.clear()
self.actions.clear() | Reset scheduler::
* Remove waiting results
* Clear checks and actions lists
:return: None | Below is the the instruction that describes the task:
### Input:
Reset scheduler::
* Remove waiting results
* Clear checks and actions lists
:return: None
### Response:
def reset(self):
# pylint: disable=not-context-manager
"""Reset scheduler::
* Remove waiting results
* Clear checks and actions lists
:return: None
"""
logger.info("Scheduling loop reset")
with self.waiting_results.mutex:
self.waiting_results.queue.clear()
self.checks.clear()
self.actions.clear() |
def fromXml(cls, xml):
"""
Restores an object from XML.
:param xml | <xml.etree.ElementTree.Element>
:return subclass of <XmlObject>
"""
clsname = xml.get('class')
if clsname:
subcls = XmlObject.byName(clsname)
if subcls is None:
inst = MissingXmlObject(clsname)
else:
inst = subcls()
else:
inst = cls()
inst.loadXml(xml)
return inst | Restores an object from XML.
:param xml | <xml.etree.ElementTree.Element>
:return subclass of <XmlObject> | Below is the the instruction that describes the task:
### Input:
Restores an object from XML.
:param xml | <xml.etree.ElementTree.Element>
:return subclass of <XmlObject>
### Response:
def fromXml(cls, xml):
"""
Restores an object from XML.
:param xml | <xml.etree.ElementTree.Element>
:return subclass of <XmlObject>
"""
clsname = xml.get('class')
if clsname:
subcls = XmlObject.byName(clsname)
if subcls is None:
inst = MissingXmlObject(clsname)
else:
inst = subcls()
else:
inst = cls()
inst.loadXml(xml)
return inst |
def document_fromstring(html, guess_charset=True, parser=None):
"""Parse a whole document into a string."""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
return parser.parse(html, useChardet=guess_charset).getroot() | Parse a whole document into a string. | Below is the the instruction that describes the task:
### Input:
Parse a whole document into a string.
### Response:
def document_fromstring(html, guess_charset=True, parser=None):
"""Parse a whole document into a string."""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
return parser.parse(html, useChardet=guess_charset).getroot() |
def build_segment_list_one(engine, gps_start_time, gps_end_time, ifo, segment_name, version = None, start_pad = 0, end_pad = 0):
"""Builds a list of segments satisfying the given criteria """
seg_result = segmentlist([])
sum_result = segmentlist([])
# Is there any way to get segment and segement summary in one query?
# Maybe some sort of outer join where we keep track of which segment
# summaries we've already seen.
sql = "SELECT segment_summary.start_time, segment_summary.end_time "
sql += "FROM segment_definer, segment_summary "
sql += "WHERE segment_summary.segment_def_id = segment_definer.segment_def_id "
sql += "AND segment_definer.ifos = '%s' " % ifo
if engine.__class__ == query_engine.LdbdQueryEngine:
sql += "AND segment_summary.segment_def_cdb = segment_definer.creator_db "
sql += "AND segment_definer.name = '%s' " % segment_name
sql += "AND segment_definer.version = %s " % version
sql += "AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)" % (gps_start_time, gps_end_time)
rows = engine.query(sql)
for sum_start_time, sum_end_time in rows:
sum_start_time = (sum_start_time < gps_start_time) and gps_start_time or sum_start_time
sum_end_time = (sum_end_time > gps_end_time) and gps_end_time or sum_end_time
sum_result |= segmentlist([segment(sum_start_time, sum_end_time)])
# We can't use queries paramaterized with ? since the ldbd protocol doesn't support it...
sql = "SELECT segment.start_time + %d, segment.end_time + %d " % (start_pad, end_pad)
sql += "FROM segment, segment_definer "
sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id "
if engine.__class__ == query_engine.LdbdQueryEngine:
sql += "AND segment.segment_def_cdb = segment_definer.creator_db "
sql += "AND segment_definer.ifos = '%s' " % ifo
sql += "AND segment_definer.name = '%s' " % segment_name
sql += "AND segment_definer.version = %s " % version
sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (gps_start_time, gps_end_time)
rows = engine.query(sql)
for seg_start_time, seg_end_time in rows:
seg_start_time = (seg_start_time < gps_start_time) and gps_start_time or seg_start_time
seg_end_time = (seg_end_time > gps_end_time) and gps_end_time or seg_end_time
seg_result |= segmentlist([segment(seg_start_time, seg_end_time)])
engine.close()
return sum_result, seg_result | Builds a list of segments satisfying the given criteria | Below is the the instruction that describes the task:
### Input:
Builds a list of segments satisfying the given criteria
### Response:
def build_segment_list_one(engine, gps_start_time, gps_end_time, ifo, segment_name, version = None, start_pad = 0, end_pad = 0):
"""Builds a list of segments satisfying the given criteria """
seg_result = segmentlist([])
sum_result = segmentlist([])
# Is there any way to get segment and segement summary in one query?
# Maybe some sort of outer join where we keep track of which segment
# summaries we've already seen.
sql = "SELECT segment_summary.start_time, segment_summary.end_time "
sql += "FROM segment_definer, segment_summary "
sql += "WHERE segment_summary.segment_def_id = segment_definer.segment_def_id "
sql += "AND segment_definer.ifos = '%s' " % ifo
if engine.__class__ == query_engine.LdbdQueryEngine:
sql += "AND segment_summary.segment_def_cdb = segment_definer.creator_db "
sql += "AND segment_definer.name = '%s' " % segment_name
sql += "AND segment_definer.version = %s " % version
sql += "AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)" % (gps_start_time, gps_end_time)
rows = engine.query(sql)
for sum_start_time, sum_end_time in rows:
sum_start_time = (sum_start_time < gps_start_time) and gps_start_time or sum_start_time
sum_end_time = (sum_end_time > gps_end_time) and gps_end_time or sum_end_time
sum_result |= segmentlist([segment(sum_start_time, sum_end_time)])
# We can't use queries paramaterized with ? since the ldbd protocol doesn't support it...
sql = "SELECT segment.start_time + %d, segment.end_time + %d " % (start_pad, end_pad)
sql += "FROM segment, segment_definer "
sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id "
if engine.__class__ == query_engine.LdbdQueryEngine:
sql += "AND segment.segment_def_cdb = segment_definer.creator_db "
sql += "AND segment_definer.ifos = '%s' " % ifo
sql += "AND segment_definer.name = '%s' " % segment_name
sql += "AND segment_definer.version = %s " % version
sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (gps_start_time, gps_end_time)
rows = engine.query(sql)
for seg_start_time, seg_end_time in rows:
seg_start_time = (seg_start_time < gps_start_time) and gps_start_time or seg_start_time
seg_end_time = (seg_end_time > gps_end_time) and gps_end_time or seg_end_time
seg_result |= segmentlist([segment(seg_start_time, seg_end_time)])
engine.close()
return sum_result, seg_result |
def getVersionFromArchiveId(git_archive_id='$Format:%ct %d$'):
""" Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
"""
# mangle the magic string to make sure it is not replaced by git archive
if not git_archive_id.startswith('$For''mat:'):
# source was modified by git archive, try to parse the version from
# the value of git_archive_id
match = re.search(r'tag:\s*v([^,)]+)', git_archive_id)
if match:
# archived revision is tagged, use the tag
return gitDescribeToPep440(match.group(1))
# archived revision is not tagged, use the commit date
tstamp = git_archive_id.strip().split()[0]
d = datetime.datetime.utcfromtimestamp(int(tstamp))
return d.strftime('%Y.%m.%d')
return None | Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details. | Below is the the instruction that describes the task:
### Input:
Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
### Response:
def getVersionFromArchiveId(git_archive_id='$Format:%ct %d$'):
""" Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
"""
# mangle the magic string to make sure it is not replaced by git archive
if not git_archive_id.startswith('$For''mat:'):
# source was modified by git archive, try to parse the version from
# the value of git_archive_id
match = re.search(r'tag:\s*v([^,)]+)', git_archive_id)
if match:
# archived revision is tagged, use the tag
return gitDescribeToPep440(match.group(1))
# archived revision is not tagged, use the commit date
tstamp = git_archive_id.strip().split()[0]
d = datetime.datetime.utcfromtimestamp(int(tstamp))
return d.strftime('%Y.%m.%d')
return None |
def get_sitk_image_from_ndarray(data3d):
"""
Prepare SimpleItk Image object and rescale data to unsigned types.
Simple ITK with version higher than 1.0.0 can not write signed int16. This function check
the SimpleITK version and use work around with Rescale Intercept and Rescale Slope
:param data3d:
:return:
"""
import SimpleITK as sitk
rescale_intercept = None
if sitk.Version.MajorVersion() > 0:
if data3d.dtype == np.int8:
rescale_intercept = -2**7
data3d = (data3d - rescale_intercept).astype(np.uint8)
elif data3d.dtype == np.int16:
# simpleitk is not able to store this. It uses only 11 bites
# rescale_intercept = -2**15
rescale_intercept = -2**10
data3d = (data3d - rescale_intercept).astype(np.uint16)
elif data3d.dtype == np.int32:
rescale_intercept = -2**31
data3d = (data3d - rescale_intercept).astype(np.uint16)
dim = sitk.GetImageFromArray(data3d)
if sitk.Version.MajorVersion() > 0:
if rescale_intercept is not None:
# rescale slope (0028|1053), rescale intercept (0028|1052)
dim.SetMetaData("0028|1052", str(rescale_intercept))
dim.SetMetaData("0028|1053", "1")
return dim | Prepare SimpleItk Image object and rescale data to unsigned types.
Simple ITK with version higher than 1.0.0 can not write signed int16. This function check
the SimpleITK version and use work around with Rescale Intercept and Rescale Slope
:param data3d:
:return: | Below is the the instruction that describes the task:
### Input:
Prepare SimpleItk Image object and rescale data to unsigned types.
Simple ITK with version higher than 1.0.0 can not write signed int16. This function check
the SimpleITK version and use work around with Rescale Intercept and Rescale Slope
:param data3d:
:return:
### Response:
def get_sitk_image_from_ndarray(data3d):
"""
Prepare SimpleItk Image object and rescale data to unsigned types.
Simple ITK with version higher than 1.0.0 can not write signed int16. This function check
the SimpleITK version and use work around with Rescale Intercept and Rescale Slope
:param data3d:
:return:
"""
import SimpleITK as sitk
rescale_intercept = None
if sitk.Version.MajorVersion() > 0:
if data3d.dtype == np.int8:
rescale_intercept = -2**7
data3d = (data3d - rescale_intercept).astype(np.uint8)
elif data3d.dtype == np.int16:
# simpleitk is not able to store this. It uses only 11 bites
# rescale_intercept = -2**15
rescale_intercept = -2**10
data3d = (data3d - rescale_intercept).astype(np.uint16)
elif data3d.dtype == np.int32:
rescale_intercept = -2**31
data3d = (data3d - rescale_intercept).astype(np.uint16)
dim = sitk.GetImageFromArray(data3d)
if sitk.Version.MajorVersion() > 0:
if rescale_intercept is not None:
# rescale slope (0028|1053), rescale intercept (0028|1052)
dim.SetMetaData("0028|1052", str(rescale_intercept))
dim.SetMetaData("0028|1053", "1")
return dim |
def load(cls, path):
"""
Load a SOM from a JSON file saved with this package.
Parameters
----------
path : str
The path to the JSON file.
Returns
-------
s : cls
A som of the specified class.
"""
data = json.load(open(path))
weights = data['weights']
weights = np.asarray(weights, dtype=np.float64)
s = cls(data['num_neurons'],
data['data_dimensionality'],
data['params']['lr']['orig'],
neighborhood=data['params']['infl']['orig'],
valfunc=data['valfunc'],
argfunc=data['argfunc'],
lr_lambda=data['params']['lr']['factor'],
nb_lambda=data['params']['nb']['factor'])
s.weights = weights
s.trained = True
return s | Load a SOM from a JSON file saved with this package.
Parameters
----------
path : str
The path to the JSON file.
Returns
-------
s : cls
A som of the specified class. | Below is the the instruction that describes the task:
### Input:
Load a SOM from a JSON file saved with this package.
Parameters
----------
path : str
The path to the JSON file.
Returns
-------
s : cls
A som of the specified class.
### Response:
def load(cls, path):
"""
Load a SOM from a JSON file saved with this package.
Parameters
----------
path : str
The path to the JSON file.
Returns
-------
s : cls
A som of the specified class.
"""
data = json.load(open(path))
weights = data['weights']
weights = np.asarray(weights, dtype=np.float64)
s = cls(data['num_neurons'],
data['data_dimensionality'],
data['params']['lr']['orig'],
neighborhood=data['params']['infl']['orig'],
valfunc=data['valfunc'],
argfunc=data['argfunc'],
lr_lambda=data['params']['lr']['factor'],
nb_lambda=data['params']['nb']['factor'])
s.weights = weights
s.trained = True
return s |
def create_snapshot(kwargs=None, call=None, wait_to_finish=False):
'''
Create a snapshot.
volume_id
The ID of the Volume from which to create a snapshot.
description
The optional description of the snapshot.
CLI Exampe:
.. code-block:: bash
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 \\
description="My Snapshot Description"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_snapshot function must be called with -f '
'or --function.'
)
if kwargs is None:
kwargs = {}
volume_id = kwargs.get('volume_id', None)
description = kwargs.get('description', '')
if volume_id is None:
raise SaltCloudSystemExit(
'A volume_id must be specified to create a snapshot.'
)
params = {'Action': 'CreateSnapshot',
'VolumeId': volume_id,
'Description': description}
log.debug(params)
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')[0]
r_data = {}
for d in data:
for k, v in six.iteritems(d):
r_data[k] = v
if 'snapshotId' in r_data:
snapshot_id = r_data['snapshotId']
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_snapshots,
kwargs={'snapshot_id': snapshot_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='completed')
return r_data | Create a snapshot.
volume_id
The ID of the Volume from which to create a snapshot.
description
The optional description of the snapshot.
CLI Exampe:
.. code-block:: bash
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 \\
description="My Snapshot Description" | Below is the the instruction that describes the task:
### Input:
Create a snapshot.
volume_id
The ID of the Volume from which to create a snapshot.
description
The optional description of the snapshot.
CLI Exampe:
.. code-block:: bash
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 \\
description="My Snapshot Description"
### Response:
def create_snapshot(kwargs=None, call=None, wait_to_finish=False):
'''
Create a snapshot.
volume_id
The ID of the Volume from which to create a snapshot.
description
The optional description of the snapshot.
CLI Exampe:
.. code-block:: bash
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 \\
description="My Snapshot Description"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_snapshot function must be called with -f '
'or --function.'
)
if kwargs is None:
kwargs = {}
volume_id = kwargs.get('volume_id', None)
description = kwargs.get('description', '')
if volume_id is None:
raise SaltCloudSystemExit(
'A volume_id must be specified to create a snapshot.'
)
params = {'Action': 'CreateSnapshot',
'VolumeId': volume_id,
'Description': description}
log.debug(params)
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')[0]
r_data = {}
for d in data:
for k, v in six.iteritems(d):
r_data[k] = v
if 'snapshotId' in r_data:
snapshot_id = r_data['snapshotId']
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_snapshots,
kwargs={'snapshot_id': snapshot_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='completed')
return r_data |
def GetFilePathsWithDialog(fileTypes=[]):
"""
Multipul File Select with dialog
fileTypes: you can choise file extension
ex) fileTypes=[('Excel Files', '.xlsx')]
"""
root = tkinter.Tk()
root.withdraw()
filepath = filedialog.askopenfilenames(
filetypes=fileTypes, parent=root)
if isinstance(filepath, str):
fileList = filepath.split(" ")
elif isinstance(filepath, tuple):
fileList = list(filepath)
elif isinstance(filepath, list):
fileList = filepath
root.destroy()
print(str(len(fileList)) + " files are selected")
return fileList | Multipul File Select with dialog
fileTypes: you can choise file extension
ex) fileTypes=[('Excel Files', '.xlsx')] | Below is the the instruction that describes the task:
### Input:
Multipul File Select with dialog
fileTypes: you can choise file extension
ex) fileTypes=[('Excel Files', '.xlsx')]
### Response:
def GetFilePathsWithDialog(fileTypes=[]):
"""
Multipul File Select with dialog
fileTypes: you can choise file extension
ex) fileTypes=[('Excel Files', '.xlsx')]
"""
root = tkinter.Tk()
root.withdraw()
filepath = filedialog.askopenfilenames(
filetypes=fileTypes, parent=root)
if isinstance(filepath, str):
fileList = filepath.split(" ")
elif isinstance(filepath, tuple):
fileList = list(filepath)
elif isinstance(filepath, list):
fileList = filepath
root.destroy()
print(str(len(fileList)) + " files are selected")
return fileList |
def _add_bearer_token(self, *args, **kwargs):
"""Add a bearer token to the request uri, body or authorization header.
This is overwritten to change the headers slightly.
"""
s = super(TwitchOAuthClient, self)
uri, headers, body = s._add_bearer_token(*args, **kwargs)
authheader = headers.get('Authorization')
if authheader:
headers['Authorization'] = authheader.replace('Bearer', 'OAuth')
return uri, headers, body | Add a bearer token to the request uri, body or authorization header.
This is overwritten to change the headers slightly. | Below is the the instruction that describes the task:
### Input:
Add a bearer token to the request uri, body or authorization header.
This is overwritten to change the headers slightly.
### Response:
def _add_bearer_token(self, *args, **kwargs):
"""Add a bearer token to the request uri, body or authorization header.
This is overwritten to change the headers slightly.
"""
s = super(TwitchOAuthClient, self)
uri, headers, body = s._add_bearer_token(*args, **kwargs)
authheader = headers.get('Authorization')
if authheader:
headers['Authorization'] = authheader.replace('Bearer', 'OAuth')
return uri, headers, body |
def generate_nonce_timestamp():
""" Generate unique nonce with counter, uuid and rng."""
global count
rng = botan.rng().get(30)
uuid4 = uuid.uuid4().bytes # 16 byte
tmpnonce = (bytes(str(count).encode('utf-8'))) + uuid4 + rng
nonce = tmpnonce[:41] # 41 byte (328 bit)
count += 1
return nonce | Generate unique nonce with counter, uuid and rng. | Below is the the instruction that describes the task:
### Input:
Generate unique nonce with counter, uuid and rng.
### Response:
def generate_nonce_timestamp():
""" Generate unique nonce with counter, uuid and rng."""
global count
rng = botan.rng().get(30)
uuid4 = uuid.uuid4().bytes # 16 byte
tmpnonce = (bytes(str(count).encode('utf-8'))) + uuid4 + rng
nonce = tmpnonce[:41] # 41 byte (328 bit)
count += 1
return nonce |
def devices(self):
"""Get a listing of devices registered to the Google Music account."""
response = self._call(
mc_calls.DeviceManagementInfo
)
registered_devices = response.body.get('data', {}).get('items', [])
return registered_devices | Get a listing of devices registered to the Google Music account. | Below is the the instruction that describes the task:
### Input:
Get a listing of devices registered to the Google Music account.
### Response:
def devices(self):
"""Get a listing of devices registered to the Google Music account."""
response = self._call(
mc_calls.DeviceManagementInfo
)
registered_devices = response.body.get('data', {}).get('items', [])
return registered_devices |
def _assert_version(self, version):
'''
Assert that the grid version is equal to or above the given value.
If no version is set, set the version.
'''
if self.nearest_version < version:
if self._version_given:
raise ValueError(
'Data type requires version %s' \
% version)
else:
self._version = version | Assert that the grid version is equal to or above the given value.
If no version is set, set the version. | Below is the the instruction that describes the task:
### Input:
Assert that the grid version is equal to or above the given value.
If no version is set, set the version.
### Response:
def _assert_version(self, version):
'''
Assert that the grid version is equal to or above the given value.
If no version is set, set the version.
'''
if self.nearest_version < version:
if self._version_given:
raise ValueError(
'Data type requires version %s' \
% version)
else:
self._version = version |
def _B(self, R):
"""Return numpy array from B1 up to and including Bn. (eqn. 6)"""
HNn_R = self._HNn / R
return HNn_R / self._sin_alpha * (0.4 * HNn_R / self._sin_alpha + 1) | Return numpy array from B1 up to and including Bn. (eqn. 6) | Below is the the instruction that describes the task:
### Input:
Return numpy array from B1 up to and including Bn. (eqn. 6)
### Response:
def _B(self, R):
"""Return numpy array from B1 up to and including Bn. (eqn. 6)"""
HNn_R = self._HNn / R
return HNn_R / self._sin_alpha * (0.4 * HNn_R / self._sin_alpha + 1) |
def writeObjectReference(self, obj, output):
"""Tries to write an object reference, adding it to the references
table. Does not write the actual object bytes or set the reference
position. Returns a tuple of whether the object was a new reference
(True if it was, False if it already was in the reference table)
and the new output.
"""
position = self.positionOfObjectReference(obj)
if position is None:
self.writtenReferences[obj] = len(self.writtenReferences)
output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize)
return (True, output)
else:
output += self.binaryInt(position, byteSize=self.trailer.objectRefSize)
return (False, output) | Tries to write an object reference, adding it to the references
table. Does not write the actual object bytes or set the reference
position. Returns a tuple of whether the object was a new reference
(True if it was, False if it already was in the reference table)
and the new output. | Below is the the instruction that describes the task:
### Input:
Tries to write an object reference, adding it to the references
table. Does not write the actual object bytes or set the reference
position. Returns a tuple of whether the object was a new reference
(True if it was, False if it already was in the reference table)
and the new output.
### Response:
def writeObjectReference(self, obj, output):
"""Tries to write an object reference, adding it to the references
table. Does not write the actual object bytes or set the reference
position. Returns a tuple of whether the object was a new reference
(True if it was, False if it already was in the reference table)
and the new output.
"""
position = self.positionOfObjectReference(obj)
if position is None:
self.writtenReferences[obj] = len(self.writtenReferences)
output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize)
return (True, output)
else:
output += self.binaryInt(position, byteSize=self.trailer.objectRefSize)
return (False, output) |
def orchestration_save(self, mode="shallow", custom_params=None):
"""Orchestration Save command
:param mode:
:param custom_params: json with all required action to configure or remove vlans from certain port
:return Serialized OrchestrationSavedArtifact to json
:rtype json
"""
save_params = {'folder_path': '', 'configuration_type': 'running', 'return_artifact': True}
params = dict()
if custom_params:
params = jsonpickle.decode(custom_params)
save_params.update(params.get('custom_params', {}))
save_params['folder_path'] = self.get_path(save_params['folder_path'])
saved_artifact = self.save(**save_params)
saved_artifact_info = OrchestrationSavedArtifactInfo(resource_name=self.resource_config.name,
created_date=datetime.datetime.now(),
restore_rules=self.get_restore_rules(),
saved_artifact=saved_artifact)
save_response = OrchestrationSaveResult(saved_artifacts_info=saved_artifact_info)
self._validate_artifact_info(saved_artifact_info)
return serialize_to_json(save_response) | Orchestration Save command
:param mode:
:param custom_params: json with all required action to configure or remove vlans from certain port
:return Serialized OrchestrationSavedArtifact to json
:rtype json | Below is the the instruction that describes the task:
### Input:
Orchestration Save command
:param mode:
:param custom_params: json with all required action to configure or remove vlans from certain port
:return Serialized OrchestrationSavedArtifact to json
:rtype json
### Response:
def orchestration_save(self, mode="shallow", custom_params=None):
"""Orchestration Save command
:param mode:
:param custom_params: json with all required action to configure or remove vlans from certain port
:return Serialized OrchestrationSavedArtifact to json
:rtype json
"""
save_params = {'folder_path': '', 'configuration_type': 'running', 'return_artifact': True}
params = dict()
if custom_params:
params = jsonpickle.decode(custom_params)
save_params.update(params.get('custom_params', {}))
save_params['folder_path'] = self.get_path(save_params['folder_path'])
saved_artifact = self.save(**save_params)
saved_artifact_info = OrchestrationSavedArtifactInfo(resource_name=self.resource_config.name,
created_date=datetime.datetime.now(),
restore_rules=self.get_restore_rules(),
saved_artifact=saved_artifact)
save_response = OrchestrationSaveResult(saved_artifacts_info=saved_artifact_info)
self._validate_artifact_info(saved_artifact_info)
return serialize_to_json(save_response) |
def _parse_prop(self, dd, row):
"""
:param dd: datadict
:param _row: (tablename, row)
:return:
"""
key = row['name']
if key.startswith('#'):
deprecated = True
else:
deprecated = False
v = dd.get(key)
_value = self._get_value(row)
if not v:
v = dd.setdefault(key, {})
v[_value] = deprecated
else:
if not _value in v:
v[_value] = deprecated | :param dd: datadict
:param _row: (tablename, row)
:return: | Below is the the instruction that describes the task:
### Input:
:param dd: datadict
:param _row: (tablename, row)
:return:
### Response:
def _parse_prop(self, dd, row):
"""
:param dd: datadict
:param _row: (tablename, row)
:return:
"""
key = row['name']
if key.startswith('#'):
deprecated = True
else:
deprecated = False
v = dd.get(key)
_value = self._get_value(row)
if not v:
v = dd.setdefault(key, {})
v[_value] = deprecated
else:
if not _value in v:
v[_value] = deprecated |
def organize_models(self, outdir, force_rerun=False):
"""Organize and rename SWISS-MODEL models to a single folder with a name containing template information.
Args:
outdir (str): New directory to copy renamed models to
force_rerun (bool): If models should be copied again even if they already exist
Returns:
dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values
"""
uniprot_to_swissmodel = defaultdict(list)
for u, models in self.all_models.items():
for m in models:
original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id'])
file_path = op.join(self.metadata_dir,
u[:2], u[2:4], u[4:], 'swissmodel',
'{}.pdb'.format(original_filename))
if op.exists(file_path):
new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4])
shutil.copy(file_path, op.join(outdir, new_filename))
uniprot_to_swissmodel[u].append(new_filename)
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return uniprot_to_swissmodel | Organize and rename SWISS-MODEL models to a single folder with a name containing template information.
Args:
outdir (str): New directory to copy renamed models to
force_rerun (bool): If models should be copied again even if they already exist
Returns:
dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values | Below is the the instruction that describes the task:
### Input:
Organize and rename SWISS-MODEL models to a single folder with a name containing template information.
Args:
outdir (str): New directory to copy renamed models to
force_rerun (bool): If models should be copied again even if they already exist
Returns:
dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values
### Response:
def organize_models(self, outdir, force_rerun=False):
"""Organize and rename SWISS-MODEL models to a single folder with a name containing template information.
Args:
outdir (str): New directory to copy renamed models to
force_rerun (bool): If models should be copied again even if they already exist
Returns:
dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values
"""
uniprot_to_swissmodel = defaultdict(list)
for u, models in self.all_models.items():
for m in models:
original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id'])
file_path = op.join(self.metadata_dir,
u[:2], u[2:4], u[4:], 'swissmodel',
'{}.pdb'.format(original_filename))
if op.exists(file_path):
new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4])
shutil.copy(file_path, op.join(outdir, new_filename))
uniprot_to_swissmodel[u].append(new_filename)
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return uniprot_to_swissmodel |
def readSTATION0(path, stations):
"""
Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)]
"""
stalist = []
f = open(path + '/STATION0.HYP', 'r')
for line in f:
if line[1:6].strip() in stations:
station = line[1:6].strip()
lat = line[6:14] # Format is either ddmm.mmS/N or ddmm(.)mmmS/N
if lat[-1] == 'S':
NS = -1
else:
NS = 1
if lat[4] == '.':
lat = (int(lat[0:2]) + float(lat[2:-1]) / 60) * NS
else:
lat = (int(lat[0:2]) + float(lat[2:4] + '.' + lat[4:-1]) /
60) * NS
lon = line[14:23]
if lon[-1] == 'W':
EW = -1
else:
EW = 1
if lon[5] == '.':
lon = (int(lon[0:3]) + float(lon[3:-1]) / 60) * EW
else:
lon = (int(lon[0:3]) + float(lon[3:5] + '.' + lon[5:-1]) /
60) * EW
elev = float(line[23:-1].strip())
# Note, negative altitude can be indicated in 1st column
if line[0] == '-':
elev *= -1
stalist.append((station, lat, lon, elev))
f.close()
f = open('station.dat', 'w')
for sta in stalist:
line = ''.join([sta[0].ljust(5), _cc_round(sta[1], 4).ljust(10),
_cc_round(sta[2], 4).ljust(10),
_cc_round(sta[3] / 1000, 4).rjust(7), '\n'])
f.write(line)
f.close()
return stalist | Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)] | Below is the the instruction that describes the task:
### Input:
Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)]
### Response:
def readSTATION0(path, stations):
"""
Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)]
"""
stalist = []
f = open(path + '/STATION0.HYP', 'r')
for line in f:
if line[1:6].strip() in stations:
station = line[1:6].strip()
lat = line[6:14] # Format is either ddmm.mmS/N or ddmm(.)mmmS/N
if lat[-1] == 'S':
NS = -1
else:
NS = 1
if lat[4] == '.':
lat = (int(lat[0:2]) + float(lat[2:-1]) / 60) * NS
else:
lat = (int(lat[0:2]) + float(lat[2:4] + '.' + lat[4:-1]) /
60) * NS
lon = line[14:23]
if lon[-1] == 'W':
EW = -1
else:
EW = 1
if lon[5] == '.':
lon = (int(lon[0:3]) + float(lon[3:-1]) / 60) * EW
else:
lon = (int(lon[0:3]) + float(lon[3:5] + '.' + lon[5:-1]) /
60) * EW
elev = float(line[23:-1].strip())
# Note, negative altitude can be indicated in 1st column
if line[0] == '-':
elev *= -1
stalist.append((station, lat, lon, elev))
f.close()
f = open('station.dat', 'w')
for sta in stalist:
line = ''.join([sta[0].ljust(5), _cc_round(sta[1], 4).ljust(10),
_cc_round(sta[2], 4).ljust(10),
_cc_round(sta[3] / 1000, 4).rjust(7), '\n'])
f.write(line)
f.close()
return stalist |
def fetch(task_id, wait=0, cached=Conf.CACHED):
"""
Return the processed task.
:param task_id: the task name or uuid
:type task_id: str or uuid
:param wait: the number of milliseconds to wait for a result
:type wait: int
:param bool cached: run this against the cache backend
:return: the full task object
:rtype: Task
"""
if cached:
return fetch_cached(task_id, wait)
start = time()
while True:
t = Task.get_task(task_id)
if t:
return t
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01) | Return the processed task.
:param task_id: the task name or uuid
:type task_id: str or uuid
:param wait: the number of milliseconds to wait for a result
:type wait: int
:param bool cached: run this against the cache backend
:return: the full task object
:rtype: Task | Below is the the instruction that describes the task:
### Input:
Return the processed task.
:param task_id: the task name or uuid
:type task_id: str or uuid
:param wait: the number of milliseconds to wait for a result
:type wait: int
:param bool cached: run this against the cache backend
:return: the full task object
:rtype: Task
### Response:
def fetch(task_id, wait=0, cached=Conf.CACHED):
"""
Return the processed task.
:param task_id: the task name or uuid
:type task_id: str or uuid
:param wait: the number of milliseconds to wait for a result
:type wait: int
:param bool cached: run this against the cache backend
:return: the full task object
:rtype: Task
"""
if cached:
return fetch_cached(task_id, wait)
start = time()
while True:
t = Task.get_task(task_id)
if t:
return t
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01) |
def _getsatname(self):
"""
Get the satellite name used in the rsr-reader, from the platform
and number
"""
if self.platform_name.startswith("Meteosat"):
return self.platform_name
else:
raise NotImplementedError(
'Platform {0} not yet supported...'.format(self.platform_name)) | Get the satellite name used in the rsr-reader, from the platform
and number | Below is the the instruction that describes the task:
### Input:
Get the satellite name used in the rsr-reader, from the platform
and number
### Response:
def _getsatname(self):
"""
Get the satellite name used in the rsr-reader, from the platform
and number
"""
if self.platform_name.startswith("Meteosat"):
return self.platform_name
else:
raise NotImplementedError(
'Platform {0} not yet supported...'.format(self.platform_name)) |
def bh_effective_spin(chi,incl):
"""
Determines the effective [as defined in Stone, Loeb,
Berger, PRD 87, 084053 (2013)] aligned dimensionless
spin parameter of a NS-BH binary with tilted BH spin.
This means finding the root chi_eff of
ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl))
with the correct sign.
Parameters
-----------
chi: float
the BH dimensionless spin parameter
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
Returns
----------
chi_eff: float
the effective dimensionless spin parameter solution
"""
if incl == 0:
chi_eff = chi
else:
# ISSO radius for the given BH spin magnitude and inclination
rISSO = PG_ISSO_solver(chi,incl)
# Angle at which the branch of positive solutions has its minumum
incl_flip = scipy.optimize.fmin(pos_branch, math.pi/4, args=tuple([chi]), full_output=False, disp=False)[-1]
# Use incl_flip to determine the initial guess: the sign difference
# in the initial_guess ensures that chi_eff has the correct sign
if incl>incl_flip:
initial_guess = -1.1
else:
initial_guess = 1.0
chi_eff = scipy.optimize.fsolve(ISCO_eq_chi_first, initial_guess, args=(rISSO))
return chi_eff | Determines the effective [as defined in Stone, Loeb,
Berger, PRD 87, 084053 (2013)] aligned dimensionless
spin parameter of a NS-BH binary with tilted BH spin.
This means finding the root chi_eff of
ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl))
with the correct sign.
Parameters
-----------
chi: float
the BH dimensionless spin parameter
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
Returns
----------
chi_eff: float
the effective dimensionless spin parameter solution | Below is the the instruction that describes the task:
### Input:
Determines the effective [as defined in Stone, Loeb,
Berger, PRD 87, 084053 (2013)] aligned dimensionless
spin parameter of a NS-BH binary with tilted BH spin.
This means finding the root chi_eff of
ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl))
with the correct sign.
Parameters
-----------
chi: float
the BH dimensionless spin parameter
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
Returns
----------
chi_eff: float
the effective dimensionless spin parameter solution
### Response:
def bh_effective_spin(chi,incl):
"""
Determines the effective [as defined in Stone, Loeb,
Berger, PRD 87, 084053 (2013)] aligned dimensionless
spin parameter of a NS-BH binary with tilted BH spin.
This means finding the root chi_eff of
ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl))
with the correct sign.
Parameters
-----------
chi: float
the BH dimensionless spin parameter
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
Returns
----------
chi_eff: float
the effective dimensionless spin parameter solution
"""
if incl == 0:
chi_eff = chi
else:
# ISSO radius for the given BH spin magnitude and inclination
rISSO = PG_ISSO_solver(chi,incl)
# Angle at which the branch of positive solutions has its minumum
incl_flip = scipy.optimize.fmin(pos_branch, math.pi/4, args=tuple([chi]), full_output=False, disp=False)[-1]
# Use incl_flip to determine the initial guess: the sign difference
# in the initial_guess ensures that chi_eff has the correct sign
if incl>incl_flip:
initial_guess = -1.1
else:
initial_guess = 1.0
chi_eff = scipy.optimize.fsolve(ISCO_eq_chi_first, initial_guess, args=(rISSO))
return chi_eff |
def ast_to_sympy(expr):
'''Converts an AST expression to a sympy expression (STUPID)'''
from dolang import to_source
s = to_source(expr)
not_to_be_treated_as_functions = ['alpha','beta', 'gamma','zeta', 'Chi']
d = {v: sympy.Symbol(v) for v in not_to_be_treated_as_functions}
return sympy.sympify(s, locals=d) | Converts an AST expression to a sympy expression (STUPID) | Below is the the instruction that describes the task:
### Input:
Converts an AST expression to a sympy expression (STUPID)
### Response:
def ast_to_sympy(expr):
'''Converts an AST expression to a sympy expression (STUPID)'''
from dolang import to_source
s = to_source(expr)
not_to_be_treated_as_functions = ['alpha','beta', 'gamma','zeta', 'Chi']
d = {v: sympy.Symbol(v) for v in not_to_be_treated_as_functions}
return sympy.sympify(s, locals=d) |
def block_worker(self, worker_id, reason):
"""
Block a worker from working on my tasks.
"""
params = {'WorkerId': worker_id, 'Reason': reason}
return self._process_request('BlockWorker', params) | Block a worker from working on my tasks. | Below is the the instruction that describes the task:
### Input:
Block a worker from working on my tasks.
### Response:
def block_worker(self, worker_id, reason):
"""
Block a worker from working on my tasks.
"""
params = {'WorkerId': worker_id, 'Reason': reason}
return self._process_request('BlockWorker', params) |
def _build(self, inputs):
"""Connects the Linear module into the graph, with input Tensor `inputs`.
If this is not the first time the module has been connected to the graph,
the Tensor provided here must have the same final dimension, in order for
the existing variables to be the correct size for the multiplication. The
batch size may differ for each connection.
Args:
inputs: A 2D Tensor of size [batch_size, input_size].
Returns:
A 2D Tensor of size [batch_size, output_size].
Raises:
base.IncompatibleShapeError: If the input is not a 2-D `Tensor` with
the size of the second dimension specified.
base.IncompatibleShapeError: If reconnecting an already connected module
into the graph, and the shape of the input is not compatible with
previous inputs.
"""
input_shape = tuple(inputs.get_shape().as_list())
if len(input_shape) != 2:
raise base.IncompatibleShapeError(
"{}: rank of shape must be 2 not: {}".format(
self.scope_name, len(input_shape)))
if input_shape[1] is None:
raise base.IncompatibleShapeError(
"{}: Input size must be specified at module build time".format(
self.scope_name))
if self._input_shape is not None and input_shape[1] != self._input_shape[1]:
raise base.IncompatibleShapeError(
"{}: Input shape must be [batch_size, {}] not: [batch_size, {}]"
.format(self.scope_name, self._input_shape[1], input_shape[1]))
self._input_shape = input_shape
dtype = inputs.dtype
if "w" not in self._initializers:
self._initializers["w"] = create_linear_initializer(self._input_shape[1],
dtype)
if "b" not in self._initializers and self._use_bias:
self._initializers["b"] = create_bias_initializer(self._input_shape[1],
dtype)
weight_shape = (self._input_shape[1], self.output_size)
self._w = tf.get_variable("w",
shape=weight_shape,
dtype=dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
outputs = tf.matmul(inputs, self._w)
if self._use_bias:
bias_shape = (self.output_size,)
self._b = tf.get_variable("b",
shape=bias_shape,
dtype=dtype,
initializer=self._initializers["b"],
partitioner=self._partitioners.get("b", None),
regularizer=self._regularizers.get("b", None))
outputs += self._b
return outputs | Connects the Linear module into the graph, with input Tensor `inputs`.
If this is not the first time the module has been connected to the graph,
the Tensor provided here must have the same final dimension, in order for
the existing variables to be the correct size for the multiplication. The
batch size may differ for each connection.
Args:
inputs: A 2D Tensor of size [batch_size, input_size].
Returns:
A 2D Tensor of size [batch_size, output_size].
Raises:
base.IncompatibleShapeError: If the input is not a 2-D `Tensor` with
the size of the second dimension specified.
base.IncompatibleShapeError: If reconnecting an already connected module
into the graph, and the shape of the input is not compatible with
previous inputs. | Below is the the instruction that describes the task:
### Input:
Connects the Linear module into the graph, with input Tensor `inputs`.
If this is not the first time the module has been connected to the graph,
the Tensor provided here must have the same final dimension, in order for
the existing variables to be the correct size for the multiplication. The
batch size may differ for each connection.
Args:
inputs: A 2D Tensor of size [batch_size, input_size].
Returns:
A 2D Tensor of size [batch_size, output_size].
Raises:
base.IncompatibleShapeError: If the input is not a 2-D `Tensor` with
the size of the second dimension specified.
base.IncompatibleShapeError: If reconnecting an already connected module
into the graph, and the shape of the input is not compatible with
previous inputs.
### Response:
def _build(self, inputs):
"""Connects the Linear module into the graph, with input Tensor `inputs`.
If this is not the first time the module has been connected to the graph,
the Tensor provided here must have the same final dimension, in order for
the existing variables to be the correct size for the multiplication. The
batch size may differ for each connection.
Args:
inputs: A 2D Tensor of size [batch_size, input_size].
Returns:
A 2D Tensor of size [batch_size, output_size].
Raises:
base.IncompatibleShapeError: If the input is not a 2-D `Tensor` with
the size of the second dimension specified.
base.IncompatibleShapeError: If reconnecting an already connected module
into the graph, and the shape of the input is not compatible with
previous inputs.
"""
input_shape = tuple(inputs.get_shape().as_list())
if len(input_shape) != 2:
raise base.IncompatibleShapeError(
"{}: rank of shape must be 2 not: {}".format(
self.scope_name, len(input_shape)))
if input_shape[1] is None:
raise base.IncompatibleShapeError(
"{}: Input size must be specified at module build time".format(
self.scope_name))
if self._input_shape is not None and input_shape[1] != self._input_shape[1]:
raise base.IncompatibleShapeError(
"{}: Input shape must be [batch_size, {}] not: [batch_size, {}]"
.format(self.scope_name, self._input_shape[1], input_shape[1]))
self._input_shape = input_shape
dtype = inputs.dtype
if "w" not in self._initializers:
self._initializers["w"] = create_linear_initializer(self._input_shape[1],
dtype)
if "b" not in self._initializers and self._use_bias:
self._initializers["b"] = create_bias_initializer(self._input_shape[1],
dtype)
weight_shape = (self._input_shape[1], self.output_size)
self._w = tf.get_variable("w",
shape=weight_shape,
dtype=dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
outputs = tf.matmul(inputs, self._w)
if self._use_bias:
bias_shape = (self.output_size,)
self._b = tf.get_variable("b",
shape=bias_shape,
dtype=dtype,
initializer=self._initializers["b"],
partitioner=self._partitioners.get("b", None),
regularizer=self._regularizers.get("b", None))
outputs += self._b
return outputs |
def register_error_code(code, exception_type, domain='core'):
"""Register a new error code"""
Logger._error_code_to_exception[code] = (exception_type, domain)
Logger._domain_codes[domain].add(code) | Register a new error code | Below is the the instruction that describes the task:
### Input:
Register a new error code
### Response:
def register_error_code(code, exception_type, domain='core'):
"""Register a new error code"""
Logger._error_code_to_exception[code] = (exception_type, domain)
Logger._domain_codes[domain].add(code) |
def get(self, didMethodName, required=True) -> DidMethod:
"""
:param didMethodName: name of DID Method
:param required: if not found and True, throws an exception, else None
:return: DID Method
"""
dm = self.d.get(didMethodName) if didMethodName else self.default
if not dm and required:
raise DidMethodNotFound
return dm | :param didMethodName: name of DID Method
:param required: if not found and True, throws an exception, else None
:return: DID Method | Below is the the instruction that describes the task:
### Input:
:param didMethodName: name of DID Method
:param required: if not found and True, throws an exception, else None
:return: DID Method
### Response:
def get(self, didMethodName, required=True) -> DidMethod:
"""
:param didMethodName: name of DID Method
:param required: if not found and True, throws an exception, else None
:return: DID Method
"""
dm = self.d.get(didMethodName) if didMethodName else self.default
if not dm and required:
raise DidMethodNotFound
return dm |
def get_reviews(self, user_id):
""" Get reviews for a particular user
"""
url = _REVIEWS_USER.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
user_id=user_id,
at=self.access_token)
return _get_request(url) | Get reviews for a particular user | Below is the the instruction that describes the task:
### Input:
Get reviews for a particular user
### Response:
def get_reviews(self, user_id):
""" Get reviews for a particular user
"""
url = _REVIEWS_USER.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
user_id=user_id,
at=self.access_token)
return _get_request(url) |
def center(self, X):
"""
Center `X` in PCA space.
"""
X = X.copy()
inan = numpy.isnan(X)
if self.mu is None:
X_ = numpy.ma.masked_array(X, inan)
self.mu = X_.mean(0).base
self.sigma = X_.std(0).base
reduce(lambda y,x: setitem(x[0], x[1], x[2]), zip(X.T, inan.T, self.mu), None)
X = X - self.mu
X = X / numpy.where(self.sigma == 0, 1e-30, self.sigma)
return X | Center `X` in PCA space. | Below is the the instruction that describes the task:
### Input:
Center `X` in PCA space.
### Response:
def center(self, X):
"""
Center `X` in PCA space.
"""
X = X.copy()
inan = numpy.isnan(X)
if self.mu is None:
X_ = numpy.ma.masked_array(X, inan)
self.mu = X_.mean(0).base
self.sigma = X_.std(0).base
reduce(lambda y,x: setitem(x[0], x[1], x[2]), zip(X.T, inan.T, self.mu), None)
X = X - self.mu
X = X / numpy.where(self.sigma == 0, 1e-30, self.sigma)
return X |
def fetch(self):
"""
Fetch a CallSummaryInstance
:returns: Fetched CallSummaryInstance
:rtype: twilio.rest.insights.v1.summary.CallSummaryInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CallSummaryInstance(self._version, payload, call_sid=self._solution['call_sid'], ) | Fetch a CallSummaryInstance
:returns: Fetched CallSummaryInstance
:rtype: twilio.rest.insights.v1.summary.CallSummaryInstance | Below is the the instruction that describes the task:
### Input:
Fetch a CallSummaryInstance
:returns: Fetched CallSummaryInstance
:rtype: twilio.rest.insights.v1.summary.CallSummaryInstance
### Response:
def fetch(self):
"""
Fetch a CallSummaryInstance
:returns: Fetched CallSummaryInstance
:rtype: twilio.rest.insights.v1.summary.CallSummaryInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CallSummaryInstance(self._version, payload, call_sid=self._solution['call_sid'], ) |
def gcmt_to_simple_array(self, centroid_location=True):
"""
Converts the GCMT catalogue to a simple array of
[ID, year, month, day, hour, minute, second, long., lat., depth, Mw,
strike1, dip1, rake1, strike2, dip2, rake2, b-plunge, b-azimuth,
b-eigenvalue, p-plunge, p-azimuth, p-eigenvalue, t-plunge, t-azimuth,
t-eigenvalue, moment, f_clvd, erel]
"""
catalogue = np.zeros([self.get_number_tensors(), 29], dtype=float)
for iloc, tensor in enumerate(self.gcmts):
catalogue[iloc, 0] = iloc
if centroid_location:
catalogue[iloc, 1] = float(tensor.centroid.date.year)
catalogue[iloc, 2] = float(tensor.centroid.date.month)
catalogue[iloc, 3] = float(tensor.centroid.date.day)
catalogue[iloc, 4] = float(tensor.centroid.time.hour)
catalogue[iloc, 5] = float(tensor.centroid.time.minute)
catalogue[iloc, 6] = np.round(
np.float(tensor.centroid.time.second) +
np.float(tensor.centroid.time.microsecond) / 1000000., 2)
catalogue[iloc, 7] = tensor.centroid.longitude
catalogue[iloc, 8] = tensor.centroid.latitude
catalogue[iloc, 9] = tensor.centroid.depth
else:
catalogue[iloc, 1] = float(tensor.hypocentre.date.year)
catalogue[iloc, 2] = float(tensor.hypocentre.date.month)
catalogue[iloc, 3] = float(tensor.hypocentre.date.day)
catalogue[iloc, 4] = float(tensor.hypocentre.time.hour)
catalogue[iloc, 5] = float(tensor.hypocentre.time.minute)
catalogue[iloc, 6] = np.round(
np.float(tensor.centroid.time.second) +
np.float(tensor.centroid.time.microsecond) / 1000000., 2)
catalogue[iloc, 7] = tensor.hypocentre.longitude
catalogue[iloc, 8] = tensor.hypocentre.latitude
catalogue[iloc, 9] = tensor.hypocentre.depth
catalogue[iloc, 10] = tensor.magnitude
catalogue[iloc, 11] = tensor.moment
catalogue[iloc, 12] = tensor.f_clvd
catalogue[iloc, 13] = tensor.e_rel
# Nodal planes
catalogue[iloc, 14] = tensor.nodal_planes.nodal_plane_1['strike']
catalogue[iloc, 15] = tensor.nodal_planes.nodal_plane_1['dip']
catalogue[iloc, 16] = tensor.nodal_planes.nodal_plane_1['rake']
catalogue[iloc, 17] = tensor.nodal_planes.nodal_plane_2['strike']
catalogue[iloc, 18] = tensor.nodal_planes.nodal_plane_2['dip']
catalogue[iloc, 19] = tensor.nodal_planes.nodal_plane_2['rake']
# Principal axes
catalogue[iloc, 20] = tensor.principal_axes.b_axis['eigenvalue']
catalogue[iloc, 21] = tensor.principal_axes.b_axis['azimuth']
catalogue[iloc, 22] = tensor.principal_axes.b_axis['plunge']
catalogue[iloc, 23] = tensor.principal_axes.p_axis['eigenvalue']
catalogue[iloc, 24] = tensor.principal_axes.p_axis['azimuth']
catalogue[iloc, 25] = tensor.principal_axes.p_axis['plunge']
catalogue[iloc, 26] = tensor.principal_axes.t_axis['eigenvalue']
catalogue[iloc, 27] = tensor.principal_axes.t_axis['azimuth']
catalogue[iloc, 28] = tensor.principal_axes.t_axis['plunge']
return catalogue | Converts the GCMT catalogue to a simple array of
[ID, year, month, day, hour, minute, second, long., lat., depth, Mw,
strike1, dip1, rake1, strike2, dip2, rake2, b-plunge, b-azimuth,
b-eigenvalue, p-plunge, p-azimuth, p-eigenvalue, t-plunge, t-azimuth,
t-eigenvalue, moment, f_clvd, erel] | Below is the the instruction that describes the task:
### Input:
Converts the GCMT catalogue to a simple array of
[ID, year, month, day, hour, minute, second, long., lat., depth, Mw,
strike1, dip1, rake1, strike2, dip2, rake2, b-plunge, b-azimuth,
b-eigenvalue, p-plunge, p-azimuth, p-eigenvalue, t-plunge, t-azimuth,
t-eigenvalue, moment, f_clvd, erel]
### Response:
def gcmt_to_simple_array(self, centroid_location=True):
"""
Converts the GCMT catalogue to a simple array of
[ID, year, month, day, hour, minute, second, long., lat., depth, Mw,
strike1, dip1, rake1, strike2, dip2, rake2, b-plunge, b-azimuth,
b-eigenvalue, p-plunge, p-azimuth, p-eigenvalue, t-plunge, t-azimuth,
t-eigenvalue, moment, f_clvd, erel]
"""
catalogue = np.zeros([self.get_number_tensors(), 29], dtype=float)
for iloc, tensor in enumerate(self.gcmts):
catalogue[iloc, 0] = iloc
if centroid_location:
catalogue[iloc, 1] = float(tensor.centroid.date.year)
catalogue[iloc, 2] = float(tensor.centroid.date.month)
catalogue[iloc, 3] = float(tensor.centroid.date.day)
catalogue[iloc, 4] = float(tensor.centroid.time.hour)
catalogue[iloc, 5] = float(tensor.centroid.time.minute)
catalogue[iloc, 6] = np.round(
np.float(tensor.centroid.time.second) +
np.float(tensor.centroid.time.microsecond) / 1000000., 2)
catalogue[iloc, 7] = tensor.centroid.longitude
catalogue[iloc, 8] = tensor.centroid.latitude
catalogue[iloc, 9] = tensor.centroid.depth
else:
catalogue[iloc, 1] = float(tensor.hypocentre.date.year)
catalogue[iloc, 2] = float(tensor.hypocentre.date.month)
catalogue[iloc, 3] = float(tensor.hypocentre.date.day)
catalogue[iloc, 4] = float(tensor.hypocentre.time.hour)
catalogue[iloc, 5] = float(tensor.hypocentre.time.minute)
catalogue[iloc, 6] = np.round(
np.float(tensor.centroid.time.second) +
np.float(tensor.centroid.time.microsecond) / 1000000., 2)
catalogue[iloc, 7] = tensor.hypocentre.longitude
catalogue[iloc, 8] = tensor.hypocentre.latitude
catalogue[iloc, 9] = tensor.hypocentre.depth
catalogue[iloc, 10] = tensor.magnitude
catalogue[iloc, 11] = tensor.moment
catalogue[iloc, 12] = tensor.f_clvd
catalogue[iloc, 13] = tensor.e_rel
# Nodal planes
catalogue[iloc, 14] = tensor.nodal_planes.nodal_plane_1['strike']
catalogue[iloc, 15] = tensor.nodal_planes.nodal_plane_1['dip']
catalogue[iloc, 16] = tensor.nodal_planes.nodal_plane_1['rake']
catalogue[iloc, 17] = tensor.nodal_planes.nodal_plane_2['strike']
catalogue[iloc, 18] = tensor.nodal_planes.nodal_plane_2['dip']
catalogue[iloc, 19] = tensor.nodal_planes.nodal_plane_2['rake']
# Principal axes
catalogue[iloc, 20] = tensor.principal_axes.b_axis['eigenvalue']
catalogue[iloc, 21] = tensor.principal_axes.b_axis['azimuth']
catalogue[iloc, 22] = tensor.principal_axes.b_axis['plunge']
catalogue[iloc, 23] = tensor.principal_axes.p_axis['eigenvalue']
catalogue[iloc, 24] = tensor.principal_axes.p_axis['azimuth']
catalogue[iloc, 25] = tensor.principal_axes.p_axis['plunge']
catalogue[iloc, 26] = tensor.principal_axes.t_axis['eigenvalue']
catalogue[iloc, 27] = tensor.principal_axes.t_axis['azimuth']
catalogue[iloc, 28] = tensor.principal_axes.t_axis['plunge']
return catalogue |
def data_check(self, participant):
"""Check a participants data."""
participant_id = participant.uniqueid
nodes = Node.query.filter_by(participant_id=participant_id).all()
if len(nodes) != self.experiment_repeats + self.practice_repeats:
print("Error: Participant has {} nodes. Data check failed"
.format(len(nodes)))
return False
nets = [n.network_id for n in nodes]
if len(nets) != len(set(nets)):
print "Error: Participant participated in the same network \
multiple times. Data check failed"
return False
if None in [n.fitness for n in nodes]:
print "Error: some of participants nodes are missing a fitness. \
Data check failed."
return False
if None in [n.score for n in nodes]:
print "Error: some of participants nodes are missing a score. \
Data check failed"
return False
return True | Check a participants data. | Below is the the instruction that describes the task:
### Input:
Check a participants data.
### Response:
def data_check(self, participant):
"""Check a participants data."""
participant_id = participant.uniqueid
nodes = Node.query.filter_by(participant_id=participant_id).all()
if len(nodes) != self.experiment_repeats + self.practice_repeats:
print("Error: Participant has {} nodes. Data check failed"
.format(len(nodes)))
return False
nets = [n.network_id for n in nodes]
if len(nets) != len(set(nets)):
print "Error: Participant participated in the same network \
multiple times. Data check failed"
return False
if None in [n.fitness for n in nodes]:
print "Error: some of participants nodes are missing a fitness. \
Data check failed."
return False
if None in [n.score for n in nodes]:
print "Error: some of participants nodes are missing a score. \
Data check failed"
return False
return True |
def debug_derivative(self, guess):
"""returns (explicit, auto)"""
from .lmmin import check_derivative
return check_derivative(self.component.npar, self.data.size,
self.lm_model, self.lm_deriv, guess) | returns (explicit, auto) | Below is the the instruction that describes the task:
### Input:
returns (explicit, auto)
### Response:
def debug_derivative(self, guess):
"""returns (explicit, auto)"""
from .lmmin import check_derivative
return check_derivative(self.component.npar, self.data.size,
self.lm_model, self.lm_deriv, guess) |
def open(cls, sock, chunk_type, isatty, chunk_eof_type=None, buf_size=None, select_timeout=None):
"""Yields the write side of a pipe that will copy appropriately chunked values to a socket."""
with cls.open_multi(sock,
(chunk_type,),
(isatty,),
chunk_eof_type,
buf_size,
select_timeout) as ctx:
yield ctx | Yields the write side of a pipe that will copy appropriately chunked values to a socket. | Below is the the instruction that describes the task:
### Input:
Yields the write side of a pipe that will copy appropriately chunked values to a socket.
### Response:
def open(cls, sock, chunk_type, isatty, chunk_eof_type=None, buf_size=None, select_timeout=None):
"""Yields the write side of a pipe that will copy appropriately chunked values to a socket."""
with cls.open_multi(sock,
(chunk_type,),
(isatty,),
chunk_eof_type,
buf_size,
select_timeout) as ctx:
yield ctx |
def installer(cv, ctx, site, force=False):
"""
Installer factory
@param cv: Current version (The version of IPS we are installing)
@type cv: ips_vagrant.common.version.Version
@type ctx: ips_vagrant.cli.Context
@param site: The IPS Site we are installing
@type site: ips_vagrant.models.sites.Site
@param force: Overwrite existing files / databases
@type force: bool
@return: Installer instance
@rtype: ips_vagrant.installer.latest.Installer
"""
log = logging.getLogger('ipsv.installer')
log.info('Loading installer for IPS %s', cv)
iv = None
for v in versions:
vstring = '.'.join(map(str, v)) if v else 'latest'
# cvstring = '.'.join(map(str, cv)) if cv else 'latest'
log.debug('Checking if version %s >= %s', vstring, cv.vstring)
if (v is None) or (v >= cv.vtuple):
log.debug('Changing installer version to %s', vstring)
iv = v
log.info('Returning installer version %s', '.'.join(map(str, iv)) if iv else 'latest')
return versions[iv](ctx, site, force) | Installer factory
@param cv: Current version (The version of IPS we are installing)
@type cv: ips_vagrant.common.version.Version
@type ctx: ips_vagrant.cli.Context
@param site: The IPS Site we are installing
@type site: ips_vagrant.models.sites.Site
@param force: Overwrite existing files / databases
@type force: bool
@return: Installer instance
@rtype: ips_vagrant.installer.latest.Installer | Below is the the instruction that describes the task:
### Input:
Installer factory
@param cv: Current version (The version of IPS we are installing)
@type cv: ips_vagrant.common.version.Version
@type ctx: ips_vagrant.cli.Context
@param site: The IPS Site we are installing
@type site: ips_vagrant.models.sites.Site
@param force: Overwrite existing files / databases
@type force: bool
@return: Installer instance
@rtype: ips_vagrant.installer.latest.Installer
### Response:
def installer(cv, ctx, site, force=False):
"""
Installer factory
@param cv: Current version (The version of IPS we are installing)
@type cv: ips_vagrant.common.version.Version
@type ctx: ips_vagrant.cli.Context
@param site: The IPS Site we are installing
@type site: ips_vagrant.models.sites.Site
@param force: Overwrite existing files / databases
@type force: bool
@return: Installer instance
@rtype: ips_vagrant.installer.latest.Installer
"""
log = logging.getLogger('ipsv.installer')
log.info('Loading installer for IPS %s', cv)
iv = None
for v in versions:
vstring = '.'.join(map(str, v)) if v else 'latest'
# cvstring = '.'.join(map(str, cv)) if cv else 'latest'
log.debug('Checking if version %s >= %s', vstring, cv.vstring)
if (v is None) or (v >= cv.vtuple):
log.debug('Changing installer version to %s', vstring)
iv = v
log.info('Returning installer version %s', '.'.join(map(str, iv)) if iv else 'latest')
return versions[iv](ctx, site, force) |
def view_portfolio_losses(token, dstore):
"""
The losses for the full portfolio, for each realization and loss type,
extracted from the event loss table.
"""
oq = dstore['oqparam']
loss_dt = oq.loss_dt()
data = portfolio_loss(dstore).view(loss_dt)[:, 0]
rlzids = [str(r) for r in range(len(data))]
array = util.compose_arrays(numpy.array(rlzids), data, 'rlz')
# this is very sensitive to rounding errors, so I am using a low precision
return rst_table(array, fmt='%.5E') | The losses for the full portfolio, for each realization and loss type,
extracted from the event loss table. | Below is the the instruction that describes the task:
### Input:
The losses for the full portfolio, for each realization and loss type,
extracted from the event loss table.
### Response:
def view_portfolio_losses(token, dstore):
"""
The losses for the full portfolio, for each realization and loss type,
extracted from the event loss table.
"""
oq = dstore['oqparam']
loss_dt = oq.loss_dt()
data = portfolio_loss(dstore).view(loss_dt)[:, 0]
rlzids = [str(r) for r in range(len(data))]
array = util.compose_arrays(numpy.array(rlzids), data, 'rlz')
# this is very sensitive to rounding errors, so I am using a low precision
return rst_table(array, fmt='%.5E') |
def output(self, stream, value):
"""SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
if stream not in self.outputs:
raise ValueError("Stream is not an output of this operator.")
e = self.expression(value)
e._stream = stream
return e | SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator. | Below is the the instruction that describes the task:
### Input:
SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
### Response:
def output(self, stream, value):
"""SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
if stream not in self.outputs:
raise ValueError("Stream is not an output of this operator.")
e = self.expression(value)
e._stream = stream
return e |
def filter(self, **filters):
"""
Add a filter to this query.
Appends to any previous filters set.
:rtype: Query
"""
q = self._clone()
for key, value in filters.items():
filter_key = re.split('__', key)
filter_attr = filter_key[0]
if filter_attr not in self._valid_filter_attrs:
raise ClientValidationError("Invalid filter attribute: %s" % key)
# we use __ as a separator in the Python library, the APIs use '.'
q._filters['.'.join(filter_key)].append(value)
return q | Add a filter to this query.
Appends to any previous filters set.
:rtype: Query | Below is the the instruction that describes the task:
### Input:
Add a filter to this query.
Appends to any previous filters set.
:rtype: Query
### Response:
def filter(self, **filters):
"""
Add a filter to this query.
Appends to any previous filters set.
:rtype: Query
"""
q = self._clone()
for key, value in filters.items():
filter_key = re.split('__', key)
filter_attr = filter_key[0]
if filter_attr not in self._valid_filter_attrs:
raise ClientValidationError("Invalid filter attribute: %s" % key)
# we use __ as a separator in the Python library, the APIs use '.'
q._filters['.'.join(filter_key)].append(value)
return q |
def unique_flags(items, key=None):
"""
Returns a list of booleans corresponding to the first instance of each
unique item.
Args:
items (Sequence): indexable collection of items
key (Callable, optional): custom normalization function.
If specified returns items where `key(item)` is unique.
Returns:
List[bool] : flags the items that are unique
Example:
>>> import ubelt as ub
>>> items = [0, 2, 1, 1, 0, 9, 2]
>>> flags = unique_flags(items)
>>> assert flags == [True, True, True, False, False, True, False]
>>> flags = unique_flags(items, key=lambda x: x % 2 == 0)
>>> assert flags == [True, False, True, False, False, False, False]
"""
len_ = len(items)
if key is None:
item_to_index = dict(zip(reversed(items), reversed(range(len_))))
indices = item_to_index.values()
else:
indices = argunique(items, key=key)
flags = boolmask(indices, len_)
return flags | Returns a list of booleans corresponding to the first instance of each
unique item.
Args:
items (Sequence): indexable collection of items
key (Callable, optional): custom normalization function.
If specified returns items where `key(item)` is unique.
Returns:
List[bool] : flags the items that are unique
Example:
>>> import ubelt as ub
>>> items = [0, 2, 1, 1, 0, 9, 2]
>>> flags = unique_flags(items)
>>> assert flags == [True, True, True, False, False, True, False]
>>> flags = unique_flags(items, key=lambda x: x % 2 == 0)
>>> assert flags == [True, False, True, False, False, False, False] | Below is the the instruction that describes the task:
### Input:
Returns a list of booleans corresponding to the first instance of each
unique item.
Args:
items (Sequence): indexable collection of items
key (Callable, optional): custom normalization function.
If specified returns items where `key(item)` is unique.
Returns:
List[bool] : flags the items that are unique
Example:
>>> import ubelt as ub
>>> items = [0, 2, 1, 1, 0, 9, 2]
>>> flags = unique_flags(items)
>>> assert flags == [True, True, True, False, False, True, False]
>>> flags = unique_flags(items, key=lambda x: x % 2 == 0)
>>> assert flags == [True, False, True, False, False, False, False]
### Response:
def unique_flags(items, key=None):
"""
Returns a list of booleans corresponding to the first instance of each
unique item.
Args:
items (Sequence): indexable collection of items
key (Callable, optional): custom normalization function.
If specified returns items where `key(item)` is unique.
Returns:
List[bool] : flags the items that are unique
Example:
>>> import ubelt as ub
>>> items = [0, 2, 1, 1, 0, 9, 2]
>>> flags = unique_flags(items)
>>> assert flags == [True, True, True, False, False, True, False]
>>> flags = unique_flags(items, key=lambda x: x % 2 == 0)
>>> assert flags == [True, False, True, False, False, False, False]
"""
len_ = len(items)
if key is None:
item_to_index = dict(zip(reversed(items), reversed(range(len_))))
indices = item_to_index.values()
else:
indices = argunique(items, key=key)
flags = boolmask(indices, len_)
return flags |
def unregister(self, uuid):
"""Unregister a schema registered with input uuid.
:raises: KeyError if uuid is not already registered.
"""
schema = self._schbyuuid.pop(uuid)
# clean schemas by name
self._schbyname[schema.name].remove(schema)
if not self._schbyname[schema.name]:
del self._schbyname[schema.name] | Unregister a schema registered with input uuid.
:raises: KeyError if uuid is not already registered. | Below is the the instruction that describes the task:
### Input:
Unregister a schema registered with input uuid.
:raises: KeyError if uuid is not already registered.
### Response:
def unregister(self, uuid):
"""Unregister a schema registered with input uuid.
:raises: KeyError if uuid is not already registered.
"""
schema = self._schbyuuid.pop(uuid)
# clean schemas by name
self._schbyname[schema.name].remove(schema)
if not self._schbyname[schema.name]:
del self._schbyname[schema.name] |
def filter(self, userinfo, user_info_claims=None):
"""
Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims.
"""
if user_info_claims is None:
return copy.copy(userinfo)
else:
result = {}
missing = []
optional = []
for key, restr in user_info_claims.items():
try:
result[key] = userinfo[key]
except KeyError:
if restr == {"essential": True}:
missing.append(key)
else:
optional.append(key)
return result | Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims. | Below is the the instruction that describes the task:
### Input:
Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims.
### Response:
def filter(self, userinfo, user_info_claims=None):
"""
Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims.
"""
if user_info_claims is None:
return copy.copy(userinfo)
else:
result = {}
missing = []
optional = []
for key, restr in user_info_claims.items():
try:
result[key] = userinfo[key]
except KeyError:
if restr == {"essential": True}:
missing.append(key)
else:
optional.append(key)
return result |
def load_state_from_disk():
""" loads the state from a local data.json file
"""
if is_there_state():
with open('data.json', 'r') as f:
data = json.load(f)
return data
else:
return False | loads the state from a local data.json file | Below is the the instruction that describes the task:
### Input:
loads the state from a local data.json file
### Response:
def load_state_from_disk():
""" loads the state from a local data.json file
"""
if is_there_state():
with open('data.json', 'r') as f:
data = json.load(f)
return data
else:
return False |
def remove(name=None, pkgs=None, **kwargs):
'''
Remove the passed package(s) from the system using winrepo
.. versionadded:: 0.16.0
Args:
name (str):
The name(s) of the package(s) to be uninstalled. Can be a
single package or a comma delimited list of packages, no spaces.
pkgs (list):
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Kwargs:
version (str):
The version of the package to be uninstalled. If this option is
used to to uninstall multiple packages, then this version will be
applied to all targeted packages. Recommended using only when
uninstalling a single package. If this parameter is omitted, the
latest version will be uninstalled.
saltenv (str): Salt environment. Default ``base``
refresh (bool): Refresh package metadata. Default ``False``
Returns:
dict: Returns a dict containing the changes.
If the package is removed by ``pkg.remove``:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If the package is already uninstalled:
{'<package>': {'current': 'not installed'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
saltenv = kwargs.get('saltenv', 'base')
refresh = salt.utils.data.is_true(kwargs.get('refresh', False))
# no need to call _refresh_db_conditional as list_pkgs will do it
ret = {}
# Make sure name or pkgs is passed
if not name and not pkgs:
return 'Must pass a single package or a list of packages'
# Get package parameters
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
# Get a list of currently installed software for comparison at the end
old = list_pkgs(saltenv=saltenv, refresh=refresh, versions_as_list=True)
# Loop through each package
changed = [] # list of changed package names
for pkgname, version_num in six.iteritems(pkg_params):
# Load package information for the package
pkginfo = _get_package_info(pkgname, saltenv=saltenv)
# Make sure pkginfo was found
if not pkginfo:
msg = 'Unable to locate package {0}'.format(pkgname)
log.error(msg)
ret[pkgname] = msg
continue
# Check to see if package is installed on the system
if pkgname not in old:
log.debug('%s %s not installed', pkgname, version_num if version_num else '')
ret[pkgname] = {'current': 'not installed'}
continue
removal_targets = []
# Only support a single version number
if version_num is not None:
# Using the salt cmdline with version=5.3 might be interpreted
# as a float it must be converted to a string in order for
# string matching to work.
version_num = six.text_type(version_num)
# At least one version of the software is installed.
if version_num is None:
for ver_install in old[pkgname]:
if ver_install not in pkginfo and 'latest' in pkginfo:
log.debug('%s %s using package latest entry to to remove', pkgname, version_num)
removal_targets.append('latest')
else:
removal_targets.append(ver_install)
else:
if version_num in pkginfo:
# we known how to remove this version
if version_num in old[pkgname]:
removal_targets.append(version_num)
else:
log.debug('%s %s not installed', pkgname, version_num)
ret[pkgname] = {'current': '{0} not installed'.format(version_num)}
continue
elif 'latest' in pkginfo:
# we do not have version entry, assume software can self upgrade and use latest
log.debug('%s %s using package latest entry to to remove', pkgname, version_num)
removal_targets.append('latest')
if not removal_targets:
log.error('%s %s no definition to remove this version', pkgname, version_num)
ret[pkgname] = {
'current': '{0} no definition, cannot removed'.format(version_num)
}
continue
for target in removal_targets:
# Get the uninstaller
uninstaller = pkginfo[target].get('uninstaller', '')
cache_dir = pkginfo[target].get('cache_dir', False)
uninstall_flags = pkginfo[target].get('uninstall_flags', '')
# If no uninstaller found, use the installer with uninstall flags
if not uninstaller and uninstall_flags:
uninstaller = pkginfo[target].get('installer', '')
# If still no uninstaller found, fail
if not uninstaller:
log.error(
'No installer or uninstaller configured for package %s',
pkgname,
)
ret[pkgname] = {'no uninstaller defined': target}
continue
# Where is the uninstaller
if uninstaller.startswith(('salt:', 'http:', 'https:', 'ftp:')):
# Check for the 'cache_dir' parameter in the .sls file
# If true, the entire directory will be cached instead of the
# individual file. This is useful for installations that are not
# single files
if cache_dir and uninstaller.startswith('salt:'):
path, _ = os.path.split(uninstaller)
__salt__['cp.cache_dir'](path,
saltenv,
False,
None,
'[email protected]$')
# Check to see if the uninstaller is cached
cached_pkg = __salt__['cp.is_cached'](uninstaller, saltenv)
if not cached_pkg:
# It's not cached. Cache it, mate.
cached_pkg = __salt__['cp.cache_file'](uninstaller, saltenv)
# Check if the uninstaller was cached successfully
if not cached_pkg:
log.error('Unable to cache %s', uninstaller)
ret[pkgname] = {'unable to cache': uninstaller}
continue
# Compare the hash of the cached installer to the source only if
# the file is hosted on salt:
# TODO cp.cache_file does cache and hash checking? So why do it again?
if uninstaller.startswith('salt:'):
if __salt__['cp.hash_file'](uninstaller, saltenv) != \
__salt__['cp.hash_file'](cached_pkg):
try:
cached_pkg = __salt__['cp.cache_file'](
uninstaller, saltenv)
except MinionError as exc:
return '{0}: {1}'.format(exc, uninstaller)
# Check if the installer was cached successfully
if not cached_pkg:
log.error('Unable to cache %s', uninstaller)
ret[pkgname] = {'unable to cache': uninstaller}
continue
else:
# Run the uninstaller directly
# (not hosted on salt:, https:, etc.)
cached_pkg = os.path.expandvars(uninstaller)
# Fix non-windows slashes
cached_pkg = cached_pkg.replace('/', '\\')
cache_path, _ = os.path.split(cached_pkg)
# os.path.expandvars is not required as we run everything through cmd.exe /s /c
if kwargs.get('extra_uninstall_flags'):
uninstall_flags = '{0} {1}'.format(
uninstall_flags, kwargs.get('extra_uninstall_flags', ''))
# Compute msiexec string
use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False))
cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR')))
# Build cmd and arguments
# cmd and arguments must be separated for use with the task scheduler
if use_msiexec:
# Check if uninstaller is set to {guid}, if not we assume its a remote msi file.
# which has already been downloaded.
arguments = '"{0}" /X "{1}"'.format(msiexec, cached_pkg)
else:
arguments = '"{0}"'.format(cached_pkg)
if uninstall_flags:
arguments = '{0} {1}'.format(arguments, uninstall_flags)
# Uninstall the software
changed.append(pkgname)
# Check Use Scheduler Option
if pkginfo[target].get('use_scheduler', False):
# Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd_shell,
arguments='/s /c "{0}"'.format(arguments),
start_in=cache_path,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00',
ac_only=False,
stop_if_on_batteries=False)
# Run Scheduled Task
if not __salt__['task.run_wait'](name='update-salt-software'):
log.error('Failed to remove %s', pkgname)
log.error('Scheduled Task failed to run')
ret[pkgname] = {'uninstall status': 'failed'}
else:
# Launch the command
result = __salt__['cmd.run_all'](
'"{0}" /s /c "{1}"'.format(cmd_shell, arguments),
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
if not result['retcode']:
ret[pkgname] = {'uninstall status': 'success'}
changed.append(pkgname)
elif result['retcode'] == 3010:
# 3010 is ERROR_SUCCESS_REBOOT_REQUIRED
report_reboot_exit_codes = kwargs.pop(
'report_reboot_exit_codes', True)
if report_reboot_exit_codes:
__salt__['system.set_reboot_required_witnessed']()
ret[pkgname] = {'uninstall status': 'success, reboot required'}
changed.append(pkgname)
elif result['retcode'] == 1641:
# 1641 is ERROR_SUCCESS_REBOOT_INITIATED
ret[pkgname] = {'uninstall status': 'success, reboot initiated'}
changed.append(pkgname)
else:
log.error('Failed to remove %s', pkgname)
log.error('retcode %s', result['retcode'])
log.error('uninstaller output: %s', result['stdout'])
ret[pkgname] = {'uninstall status': 'failed'}
# Get a new list of installed software
new = list_pkgs(saltenv=saltenv, refresh=False)
# Take the "old" package list and convert the values to strings in
# preparation for the comparison below.
__salt__['pkg_resource.stringify'](old)
# Check for changes in the registry
difference = salt.utils.data.compare_dicts(old, new)
found_chgs = all(name in difference for name in changed)
end_t = time.time() + 3 # give it 3 seconds to catch up.
while not found_chgs and time.time() < end_t:
time.sleep(0.5)
new = list_pkgs(saltenv=saltenv, refresh=False)
difference = salt.utils.data.compare_dicts(old, new)
found_chgs = all(name in difference for name in changed)
if not found_chgs:
log.warning('Expected changes for package removal may not have occured')
# Compare the software list before and after
# Add the difference to ret
ret.update(difference)
return ret | Remove the passed package(s) from the system using winrepo
.. versionadded:: 0.16.0
Args:
name (str):
The name(s) of the package(s) to be uninstalled. Can be a
single package or a comma delimited list of packages, no spaces.
pkgs (list):
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Kwargs:
version (str):
The version of the package to be uninstalled. If this option is
used to to uninstall multiple packages, then this version will be
applied to all targeted packages. Recommended using only when
uninstalling a single package. If this parameter is omitted, the
latest version will be uninstalled.
saltenv (str): Salt environment. Default ``base``
refresh (bool): Refresh package metadata. Default ``False``
Returns:
dict: Returns a dict containing the changes.
If the package is removed by ``pkg.remove``:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If the package is already uninstalled:
{'<package>': {'current': 'not installed'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]' | Below is the the instruction that describes the task:
### Input:
Remove the passed package(s) from the system using winrepo
.. versionadded:: 0.16.0
Args:
name (str):
The name(s) of the package(s) to be uninstalled. Can be a
single package or a comma delimited list of packages, no spaces.
pkgs (list):
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Kwargs:
version (str):
The version of the package to be uninstalled. If this option is
used to to uninstall multiple packages, then this version will be
applied to all targeted packages. Recommended using only when
uninstalling a single package. If this parameter is omitted, the
latest version will be uninstalled.
saltenv (str): Salt environment. Default ``base``
refresh (bool): Refresh package metadata. Default ``False``
Returns:
dict: Returns a dict containing the changes.
If the package is removed by ``pkg.remove``:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If the package is already uninstalled:
{'<package>': {'current': 'not installed'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
### Response:
def remove(name=None, pkgs=None, **kwargs):
'''
Remove the passed package(s) from the system using winrepo
.. versionadded:: 0.16.0
Args:
name (str):
The name(s) of the package(s) to be uninstalled. Can be a
single package or a comma delimited list of packages, no spaces.
pkgs (list):
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Kwargs:
version (str):
The version of the package to be uninstalled. If this option is
used to to uninstall multiple packages, then this version will be
applied to all targeted packages. Recommended using only when
uninstalling a single package. If this parameter is omitted, the
latest version will be uninstalled.
saltenv (str): Salt environment. Default ``base``
refresh (bool): Refresh package metadata. Default ``False``
Returns:
dict: Returns a dict containing the changes.
If the package is removed by ``pkg.remove``:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If the package is already uninstalled:
{'<package>': {'current': 'not installed'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
saltenv = kwargs.get('saltenv', 'base')
refresh = salt.utils.data.is_true(kwargs.get('refresh', False))
# no need to call _refresh_db_conditional as list_pkgs will do it
ret = {}
# Make sure name or pkgs is passed
if not name and not pkgs:
return 'Must pass a single package or a list of packages'
# Get package parameters
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
# Get a list of currently installed software for comparison at the end
old = list_pkgs(saltenv=saltenv, refresh=refresh, versions_as_list=True)
# Loop through each package
changed = [] # list of changed package names
for pkgname, version_num in six.iteritems(pkg_params):
# Load package information for the package
pkginfo = _get_package_info(pkgname, saltenv=saltenv)
# Make sure pkginfo was found
if not pkginfo:
msg = 'Unable to locate package {0}'.format(pkgname)
log.error(msg)
ret[pkgname] = msg
continue
# Check to see if package is installed on the system
if pkgname not in old:
log.debug('%s %s not installed', pkgname, version_num if version_num else '')
ret[pkgname] = {'current': 'not installed'}
continue
removal_targets = []
# Only support a single version number
if version_num is not None:
# Using the salt cmdline with version=5.3 might be interpreted
# as a float it must be converted to a string in order for
# string matching to work.
version_num = six.text_type(version_num)
# At least one version of the software is installed.
if version_num is None:
for ver_install in old[pkgname]:
if ver_install not in pkginfo and 'latest' in pkginfo:
log.debug('%s %s using package latest entry to to remove', pkgname, version_num)
removal_targets.append('latest')
else:
removal_targets.append(ver_install)
else:
if version_num in pkginfo:
# we known how to remove this version
if version_num in old[pkgname]:
removal_targets.append(version_num)
else:
log.debug('%s %s not installed', pkgname, version_num)
ret[pkgname] = {'current': '{0} not installed'.format(version_num)}
continue
elif 'latest' in pkginfo:
# we do not have version entry, assume software can self upgrade and use latest
log.debug('%s %s using package latest entry to to remove', pkgname, version_num)
removal_targets.append('latest')
if not removal_targets:
log.error('%s %s no definition to remove this version', pkgname, version_num)
ret[pkgname] = {
'current': '{0} no definition, cannot removed'.format(version_num)
}
continue
for target in removal_targets:
# Get the uninstaller
uninstaller = pkginfo[target].get('uninstaller', '')
cache_dir = pkginfo[target].get('cache_dir', False)
uninstall_flags = pkginfo[target].get('uninstall_flags', '')
# If no uninstaller found, use the installer with uninstall flags
if not uninstaller and uninstall_flags:
uninstaller = pkginfo[target].get('installer', '')
# If still no uninstaller found, fail
if not uninstaller:
log.error(
'No installer or uninstaller configured for package %s',
pkgname,
)
ret[pkgname] = {'no uninstaller defined': target}
continue
# Where is the uninstaller
if uninstaller.startswith(('salt:', 'http:', 'https:', 'ftp:')):
# Check for the 'cache_dir' parameter in the .sls file
# If true, the entire directory will be cached instead of the
# individual file. This is useful for installations that are not
# single files
if cache_dir and uninstaller.startswith('salt:'):
path, _ = os.path.split(uninstaller)
__salt__['cp.cache_dir'](path,
saltenv,
False,
None,
'[email protected]$')
# Check to see if the uninstaller is cached
cached_pkg = __salt__['cp.is_cached'](uninstaller, saltenv)
if not cached_pkg:
# It's not cached. Cache it, mate.
cached_pkg = __salt__['cp.cache_file'](uninstaller, saltenv)
# Check if the uninstaller was cached successfully
if not cached_pkg:
log.error('Unable to cache %s', uninstaller)
ret[pkgname] = {'unable to cache': uninstaller}
continue
# Compare the hash of the cached installer to the source only if
# the file is hosted on salt:
# TODO cp.cache_file does cache and hash checking? So why do it again?
if uninstaller.startswith('salt:'):
if __salt__['cp.hash_file'](uninstaller, saltenv) != \
__salt__['cp.hash_file'](cached_pkg):
try:
cached_pkg = __salt__['cp.cache_file'](
uninstaller, saltenv)
except MinionError as exc:
return '{0}: {1}'.format(exc, uninstaller)
# Check if the installer was cached successfully
if not cached_pkg:
log.error('Unable to cache %s', uninstaller)
ret[pkgname] = {'unable to cache': uninstaller}
continue
else:
# Run the uninstaller directly
# (not hosted on salt:, https:, etc.)
cached_pkg = os.path.expandvars(uninstaller)
# Fix non-windows slashes
cached_pkg = cached_pkg.replace('/', '\\')
cache_path, _ = os.path.split(cached_pkg)
# os.path.expandvars is not required as we run everything through cmd.exe /s /c
if kwargs.get('extra_uninstall_flags'):
uninstall_flags = '{0} {1}'.format(
uninstall_flags, kwargs.get('extra_uninstall_flags', ''))
# Compute msiexec string
use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False))
cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR')))
# Build cmd and arguments
# cmd and arguments must be separated for use with the task scheduler
if use_msiexec:
# Check if uninstaller is set to {guid}, if not we assume its a remote msi file.
# which has already been downloaded.
arguments = '"{0}" /X "{1}"'.format(msiexec, cached_pkg)
else:
arguments = '"{0}"'.format(cached_pkg)
if uninstall_flags:
arguments = '{0} {1}'.format(arguments, uninstall_flags)
# Uninstall the software
changed.append(pkgname)
# Check Use Scheduler Option
if pkginfo[target].get('use_scheduler', False):
# Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd_shell,
arguments='/s /c "{0}"'.format(arguments),
start_in=cache_path,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00',
ac_only=False,
stop_if_on_batteries=False)
# Run Scheduled Task
if not __salt__['task.run_wait'](name='update-salt-software'):
log.error('Failed to remove %s', pkgname)
log.error('Scheduled Task failed to run')
ret[pkgname] = {'uninstall status': 'failed'}
else:
# Launch the command
result = __salt__['cmd.run_all'](
'"{0}" /s /c "{1}"'.format(cmd_shell, arguments),
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
if not result['retcode']:
ret[pkgname] = {'uninstall status': 'success'}
changed.append(pkgname)
elif result['retcode'] == 3010:
# 3010 is ERROR_SUCCESS_REBOOT_REQUIRED
report_reboot_exit_codes = kwargs.pop(
'report_reboot_exit_codes', True)
if report_reboot_exit_codes:
__salt__['system.set_reboot_required_witnessed']()
ret[pkgname] = {'uninstall status': 'success, reboot required'}
changed.append(pkgname)
elif result['retcode'] == 1641:
# 1641 is ERROR_SUCCESS_REBOOT_INITIATED
ret[pkgname] = {'uninstall status': 'success, reboot initiated'}
changed.append(pkgname)
else:
log.error('Failed to remove %s', pkgname)
log.error('retcode %s', result['retcode'])
log.error('uninstaller output: %s', result['stdout'])
ret[pkgname] = {'uninstall status': 'failed'}
# Get a new list of installed software
new = list_pkgs(saltenv=saltenv, refresh=False)
# Take the "old" package list and convert the values to strings in
# preparation for the comparison below.
__salt__['pkg_resource.stringify'](old)
# Check for changes in the registry
difference = salt.utils.data.compare_dicts(old, new)
found_chgs = all(name in difference for name in changed)
end_t = time.time() + 3 # give it 3 seconds to catch up.
while not found_chgs and time.time() < end_t:
time.sleep(0.5)
new = list_pkgs(saltenv=saltenv, refresh=False)
difference = salt.utils.data.compare_dicts(old, new)
found_chgs = all(name in difference for name in changed)
if not found_chgs:
log.warning('Expected changes for package removal may not have occured')
# Compare the software list before and after
# Add the difference to ret
ret.update(difference)
return ret |
def drop_database(self, name, force=False):
"""
Drop an MapD database
Parameters
----------
name : string
Database name
force : boolean, default False
If False and there are any tables in this database, raises an
IntegrityError
"""
tables = []
if not force or self.database(name):
tables = self.list_tables(database=name)
if not force and len(tables):
raise com.IntegrityError(
'Database {0} must be empty before being dropped, or set '
'force=True'.format(name)
)
statement = ddl.DropDatabase(name)
self._execute(statement) | Drop an MapD database
Parameters
----------
name : string
Database name
force : boolean, default False
If False and there are any tables in this database, raises an
IntegrityError | Below is the the instruction that describes the task:
### Input:
Drop an MapD database
Parameters
----------
name : string
Database name
force : boolean, default False
If False and there are any tables in this database, raises an
IntegrityError
### Response:
def drop_database(self, name, force=False):
"""
Drop an MapD database
Parameters
----------
name : string
Database name
force : boolean, default False
If False and there are any tables in this database, raises an
IntegrityError
"""
tables = []
if not force or self.database(name):
tables = self.list_tables(database=name)
if not force and len(tables):
raise com.IntegrityError(
'Database {0} must be empty before being dropped, or set '
'force=True'.format(name)
)
statement = ddl.DropDatabase(name)
self._execute(statement) |
def build_post_form_args(self, bucket_name, key, expires_in = 6000,
acl = None, success_action_redirect = None,
max_content_length = None,
http_method = "http", fields=None,
conditions=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: :class:`boto.s3.acl.ACL`
:param acl: ACL rule to use, if any
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
{
"action": action_url_to_post_to,
"fields": [
{
"name": field_name,
"value": field_value
},
{
"name": field_name2,
"value": field_value2
}
]
}
"""
if fields == None:
fields = []
if conditions == None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({ "name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({ "name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length})
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId",
"value": self.aws_access_key_id})
# Add signature for encoded policy document as the 'AWSAccessKeyId' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method,
self.calling_format.build_host(self.server_name(),
bucket_name))
return {"action": url, "fields": fields} | Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: :class:`boto.s3.acl.ACL`
:param acl: ACL rule to use, if any
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
{
"action": action_url_to_post_to,
"fields": [
{
"name": field_name,
"value": field_value
},
{
"name": field_name2,
"value": field_value2
}
]
} | Below is the the instruction that describes the task:
### Input:
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: :class:`boto.s3.acl.ACL`
:param acl: ACL rule to use, if any
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
{
"action": action_url_to_post_to,
"fields": [
{
"name": field_name,
"value": field_value
},
{
"name": field_name2,
"value": field_value2
}
]
}
### Response:
def build_post_form_args(self, bucket_name, key, expires_in = 6000,
acl = None, success_action_redirect = None,
max_content_length = None,
http_method = "http", fields=None,
conditions=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: :class:`boto.s3.acl.ACL`
:param acl: ACL rule to use, if any
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
{
"action": action_url_to_post_to,
"fields": [
{
"name": field_name,
"value": field_value
},
{
"name": field_name2,
"value": field_value2
}
]
}
"""
if fields == None:
fields = []
if conditions == None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({ "name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({ "name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length})
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId",
"value": self.aws_access_key_id})
# Add signature for encoded policy document as the 'AWSAccessKeyId' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method,
self.calling_format.build_host(self.server_name(),
bucket_name))
return {"action": url, "fields": fields} |
def structured_partlist(input, timeout=20, showgui=False):
'''export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
'''
s = raw_partlist(input=input, timeout=timeout, showgui=showgui)
return parse_partlist(s) | export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..]) | Below is the the instruction that describes the task:
### Input:
export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
### Response:
def structured_partlist(input, timeout=20, showgui=False):
'''export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
'''
s = raw_partlist(input=input, timeout=timeout, showgui=showgui)
return parse_partlist(s) |
def get(self, exchange, virtual_host='/'):
"""Get Exchange details.
:param str exchange: Exchange name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_EXCHANGE
% (
virtual_host,
exchange)
) | Get Exchange details.
:param str exchange: Exchange name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get Exchange details.
:param str exchange: Exchange name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
### Response:
def get(self, exchange, virtual_host='/'):
"""Get Exchange details.
:param str exchange: Exchange name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_EXCHANGE
% (
virtual_host,
exchange)
) |
def set_min(self, fmin):
"""
Updates minimum value
"""
if round(100000*fmin) != 100000*fmin:
raise DriverError('utils.widgets.Expose.set_min: ' +
'fmin must be a multiple of 0.00001')
self.fmin = fmin
self.set(self.fmin) | Updates minimum value | Below is the the instruction that describes the task:
### Input:
Updates minimum value
### Response:
def set_min(self, fmin):
"""
Updates minimum value
"""
if round(100000*fmin) != 100000*fmin:
raise DriverError('utils.widgets.Expose.set_min: ' +
'fmin must be a multiple of 0.00001')
self.fmin = fmin
self.set(self.fmin) |
def inspect_partitions(bucket):
"""Discover the partitions on a bucket via introspection.
For large buckets which lack s3 inventories, salactus will attempt
to process objects in parallel on the bucket by breaking the bucket
into a separate keyspace partitions. It does this with a heurestic
that attempts to sample the keyspace and determine appropriate subparts.
This command provides additional visibility into the partitioning of
a bucket by showing how salactus would partition a given bucket.
"""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger('botocore').setLevel(level=logging.WARNING)
state = db.db()
# add db.bucket accessor
found = None
for b in state.buckets():
if b.name == bucket:
found = b
break
if not found:
click.echo("no bucket named: %s" % bucket)
return
keyset = []
partitions = []
def process_keyset(bid, page):
keyset.append(len(page))
def process_bucket_iterator(bid, prefix, delimiter="", **continuation):
partitions.append(prefix)
# synchronous execution
def invoke(f, *args, **kw):
return f(*args, **kw)
# unleash the monkies ;-)
worker.connection.hincrby = lambda x, y, z: True
worker.invoke = invoke
worker.process_keyset = process_keyset
worker.process_bucket_iterator = process_bucket_iterator
# kick it off
worker.process_bucket_partitions(b.bucket_id)
keys_scanned = sum(keyset)
click.echo(
"Found %d partitions %s keys scanned during partitioning" % (
len(partitions), keys_scanned))
click.echo("\n".join(partitions)) | Discover the partitions on a bucket via introspection.
For large buckets which lack s3 inventories, salactus will attempt
to process objects in parallel on the bucket by breaking the bucket
into a separate keyspace partitions. It does this with a heurestic
that attempts to sample the keyspace and determine appropriate subparts.
This command provides additional visibility into the partitioning of
a bucket by showing how salactus would partition a given bucket. | Below is the the instruction that describes the task:
### Input:
Discover the partitions on a bucket via introspection.
For large buckets which lack s3 inventories, salactus will attempt
to process objects in parallel on the bucket by breaking the bucket
into a separate keyspace partitions. It does this with a heurestic
that attempts to sample the keyspace and determine appropriate subparts.
This command provides additional visibility into the partitioning of
a bucket by showing how salactus would partition a given bucket.
### Response:
def inspect_partitions(bucket):
"""Discover the partitions on a bucket via introspection.
For large buckets which lack s3 inventories, salactus will attempt
to process objects in parallel on the bucket by breaking the bucket
into a separate keyspace partitions. It does this with a heurestic
that attempts to sample the keyspace and determine appropriate subparts.
This command provides additional visibility into the partitioning of
a bucket by showing how salactus would partition a given bucket.
"""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger('botocore').setLevel(level=logging.WARNING)
state = db.db()
# add db.bucket accessor
found = None
for b in state.buckets():
if b.name == bucket:
found = b
break
if not found:
click.echo("no bucket named: %s" % bucket)
return
keyset = []
partitions = []
def process_keyset(bid, page):
keyset.append(len(page))
def process_bucket_iterator(bid, prefix, delimiter="", **continuation):
partitions.append(prefix)
# synchronous execution
def invoke(f, *args, **kw):
return f(*args, **kw)
# unleash the monkies ;-)
worker.connection.hincrby = lambda x, y, z: True
worker.invoke = invoke
worker.process_keyset = process_keyset
worker.process_bucket_iterator = process_bucket_iterator
# kick it off
worker.process_bucket_partitions(b.bucket_id)
keys_scanned = sum(keyset)
click.echo(
"Found %d partitions %s keys scanned during partitioning" % (
len(partitions), keys_scanned))
click.echo("\n".join(partitions)) |
def _get_format_name_loader_mapping(self):
"""
:return: Mappings of format-name and loader class.
:rtype: dict
"""
loader_table = self._get_common_loader_mapping()
loader_table.update(
{
"excel": ExcelTableFileLoader,
"json_lines": JsonLinesTableTextLoader,
"markdown": MarkdownTableTextLoader,
"mediawiki": MediaWikiTableTextLoader,
"ssv": CsvTableFileLoader,
}
)
return loader_table | :return: Mappings of format-name and loader class.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
:return: Mappings of format-name and loader class.
:rtype: dict
### Response:
def _get_format_name_loader_mapping(self):
"""
:return: Mappings of format-name and loader class.
:rtype: dict
"""
loader_table = self._get_common_loader_mapping()
loader_table.update(
{
"excel": ExcelTableFileLoader,
"json_lines": JsonLinesTableTextLoader,
"markdown": MarkdownTableTextLoader,
"mediawiki": MediaWikiTableTextLoader,
"ssv": CsvTableFileLoader,
}
)
return loader_table |
def user_packages(
self,
login=None,
platform=None,
package_type=None,
type_=None,
access=None):
'''
Returns a list of packages for a given user and optionally filter
by `platform`, `package_type` and `type_`.
:param login: (optional) the login name of the user or None. If login
is None this method will return the packages for the
authenticated user.
:param platform: only find packages that include files for this platform.
(e.g. 'linux-64', 'osx-64', 'win-32')
:param package_type: only find packages that have this kind of file
(e.g. 'env', 'conda', 'pypi')
:param type_: only find packages that have this conda `type`
(i.e. 'app')
:param access: only find packages that have this access level
(e.g. 'private', 'authenticated', 'public')
'''
if login:
url = '{0}/packages/{1}'.format(self.domain, login)
else:
url = '{0}/packages'.format(self.domain)
arguments = collections.OrderedDict()
if platform:
arguments['platform'] = platform
if package_type:
arguments['package_type'] = package_type
if type_:
arguments['type'] = type_
if access:
arguments['access'] = access
res = self.session.get(url, params=arguments)
self._check_response(res)
return res.json() | Returns a list of packages for a given user and optionally filter
by `platform`, `package_type` and `type_`.
:param login: (optional) the login name of the user or None. If login
is None this method will return the packages for the
authenticated user.
:param platform: only find packages that include files for this platform.
(e.g. 'linux-64', 'osx-64', 'win-32')
:param package_type: only find packages that have this kind of file
(e.g. 'env', 'conda', 'pypi')
:param type_: only find packages that have this conda `type`
(i.e. 'app')
:param access: only find packages that have this access level
(e.g. 'private', 'authenticated', 'public') | Below is the the instruction that describes the task:
### Input:
Returns a list of packages for a given user and optionally filter
by `platform`, `package_type` and `type_`.
:param login: (optional) the login name of the user or None. If login
is None this method will return the packages for the
authenticated user.
:param platform: only find packages that include files for this platform.
(e.g. 'linux-64', 'osx-64', 'win-32')
:param package_type: only find packages that have this kind of file
(e.g. 'env', 'conda', 'pypi')
:param type_: only find packages that have this conda `type`
(i.e. 'app')
:param access: only find packages that have this access level
(e.g. 'private', 'authenticated', 'public')
### Response:
def user_packages(
self,
login=None,
platform=None,
package_type=None,
type_=None,
access=None):
'''
Returns a list of packages for a given user and optionally filter
by `platform`, `package_type` and `type_`.
:param login: (optional) the login name of the user or None. If login
is None this method will return the packages for the
authenticated user.
:param platform: only find packages that include files for this platform.
(e.g. 'linux-64', 'osx-64', 'win-32')
:param package_type: only find packages that have this kind of file
(e.g. 'env', 'conda', 'pypi')
:param type_: only find packages that have this conda `type`
(i.e. 'app')
:param access: only find packages that have this access level
(e.g. 'private', 'authenticated', 'public')
'''
if login:
url = '{0}/packages/{1}'.format(self.domain, login)
else:
url = '{0}/packages'.format(self.domain)
arguments = collections.OrderedDict()
if platform:
arguments['platform'] = platform
if package_type:
arguments['package_type'] = package_type
if type_:
arguments['type'] = type_
if access:
arguments['access'] = access
res = self.session.get(url, params=arguments)
self._check_response(res)
return res.json() |
def timer(self, stat, tags=None):
"""Contextmanager for easily computing timings.
:arg string stat: A period delimited alphanumeric key.
:arg list-of-strings tags: Each string in the tag consists of a key and
a value separated by a colon. Tags can make it easier to break down
metrics for analysis.
For example ``['env:stage', 'compressed:yes']``.
For example:
>>> mymetrics = get_metrics(__name__)
>>> def long_function():
... with mymetrics.timer('long_function'):
... # perform some thing we want to keep metrics on
... pass
.. Note::
All timings generated with this are in milliseconds.
"""
if six.PY3:
start_time = time.perf_counter()
else:
start_time = time.time()
yield
if six.PY3:
end_time = time.perf_counter()
else:
end_time = time.time()
delta = end_time - start_time
self.timing(stat, value=delta * 1000.0, tags=tags) | Contextmanager for easily computing timings.
:arg string stat: A period delimited alphanumeric key.
:arg list-of-strings tags: Each string in the tag consists of a key and
a value separated by a colon. Tags can make it easier to break down
metrics for analysis.
For example ``['env:stage', 'compressed:yes']``.
For example:
>>> mymetrics = get_metrics(__name__)
>>> def long_function():
... with mymetrics.timer('long_function'):
... # perform some thing we want to keep metrics on
... pass
.. Note::
All timings generated with this are in milliseconds. | Below is the the instruction that describes the task:
### Input:
Contextmanager for easily computing timings.
:arg string stat: A period delimited alphanumeric key.
:arg list-of-strings tags: Each string in the tag consists of a key and
a value separated by a colon. Tags can make it easier to break down
metrics for analysis.
For example ``['env:stage', 'compressed:yes']``.
For example:
>>> mymetrics = get_metrics(__name__)
>>> def long_function():
... with mymetrics.timer('long_function'):
... # perform some thing we want to keep metrics on
... pass
.. Note::
All timings generated with this are in milliseconds.
### Response:
def timer(self, stat, tags=None):
"""Contextmanager for easily computing timings.
:arg string stat: A period delimited alphanumeric key.
:arg list-of-strings tags: Each string in the tag consists of a key and
a value separated by a colon. Tags can make it easier to break down
metrics for analysis.
For example ``['env:stage', 'compressed:yes']``.
For example:
>>> mymetrics = get_metrics(__name__)
>>> def long_function():
... with mymetrics.timer('long_function'):
... # perform some thing we want to keep metrics on
... pass
.. Note::
All timings generated with this are in milliseconds.
"""
if six.PY3:
start_time = time.perf_counter()
else:
start_time = time.time()
yield
if six.PY3:
end_time = time.perf_counter()
else:
end_time = time.time()
delta = end_time - start_time
self.timing(stat, value=delta * 1000.0, tags=tags) |
Subsets and Splits