code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'."""
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result | Returns a property set which include all
properties in 'properties' that have any of 'attributes'. | Below is the the instruction that describes the task:
### Input:
Returns a property set which include all
properties in 'properties' that have any of 'attributes'.
### Response:
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'."""
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result |
def unzip(file_obj):
"""
Take a path to a zipfile and checks if it is a valid zip file
and returns...
"""
files = []
# TODO: implement try-except here
zip = ZipFile(file_obj)
bad_file = zip.testzip()
if bad_file:
raise Exception('"%s" in the .zip archive is corrupt.' % bad_file)
infolist = zip.infolist()
for zipinfo in infolist:
if zipinfo.filename.startswith('__'): # do not process meta files
continue
file_obj = SimpleUploadedFile(name=zipinfo.filename, content=zip.read(zipinfo))
files.append((file_obj, zipinfo.filename))
zip.close()
return files | Take a path to a zipfile and checks if it is a valid zip file
and returns... | Below is the the instruction that describes the task:
### Input:
Take a path to a zipfile and checks if it is a valid zip file
and returns...
### Response:
def unzip(file_obj):
"""
Take a path to a zipfile and checks if it is a valid zip file
and returns...
"""
files = []
# TODO: implement try-except here
zip = ZipFile(file_obj)
bad_file = zip.testzip()
if bad_file:
raise Exception('"%s" in the .zip archive is corrupt.' % bad_file)
infolist = zip.infolist()
for zipinfo in infolist:
if zipinfo.filename.startswith('__'): # do not process meta files
continue
file_obj = SimpleUploadedFile(name=zipinfo.filename, content=zip.read(zipinfo))
files.append((file_obj, zipinfo.filename))
zip.close()
return files |
def keyPressEvent(self, event):
"""Qt Override."""
key = event.key()
if key in [Qt.Key_Up]:
self._parent.previous_row()
elif key in [Qt.Key_Down]:
self._parent.next_row()
elif key in [Qt.Key_Enter, Qt.Key_Return]:
self._parent.show_editor()
else:
super(ShortcutFinder, self).keyPressEvent(event) | Qt Override. | Below is the the instruction that describes the task:
### Input:
Qt Override.
### Response:
def keyPressEvent(self, event):
"""Qt Override."""
key = event.key()
if key in [Qt.Key_Up]:
self._parent.previous_row()
elif key in [Qt.Key_Down]:
self._parent.next_row()
elif key in [Qt.Key_Enter, Qt.Key_Return]:
self._parent.show_editor()
else:
super(ShortcutFinder, self).keyPressEvent(event) |
def is_email(potential_email_address):
"""
Check if potential_email_address is a valid e-mail address.
Please note that this function has no false-negatives but many
false-positives. So if it returns that the input is not a valid
e-mail adress, it certainly isn't. If it returns True, it might still be
invalid. For example, the domain could not be registered.
Parameters
----------
potential_email_address : str
Returns
-------
is_email : bool
Examples
--------
>>> is_email('')
False
>>> is_email('[email protected]')
True
>>> is_email('[email protected]')
True
>>> is_email('Martin Thoma <[email protected]>')
False
>>> is_email('info@martin-thoma')
False
"""
context, mail = parseaddr(potential_email_address)
first_condition = len(context) == 0 and len(mail) != 0
dot_after_at = ('@' in potential_email_address and
'.' in potential_email_address.split('@')[1])
return first_condition and dot_after_at | Check if potential_email_address is a valid e-mail address.
Please note that this function has no false-negatives but many
false-positives. So if it returns that the input is not a valid
e-mail adress, it certainly isn't. If it returns True, it might still be
invalid. For example, the domain could not be registered.
Parameters
----------
potential_email_address : str
Returns
-------
is_email : bool
Examples
--------
>>> is_email('')
False
>>> is_email('[email protected]')
True
>>> is_email('[email protected]')
True
>>> is_email('Martin Thoma <[email protected]>')
False
>>> is_email('info@martin-thoma')
False | Below is the the instruction that describes the task:
### Input:
Check if potential_email_address is a valid e-mail address.
Please note that this function has no false-negatives but many
false-positives. So if it returns that the input is not a valid
e-mail adress, it certainly isn't. If it returns True, it might still be
invalid. For example, the domain could not be registered.
Parameters
----------
potential_email_address : str
Returns
-------
is_email : bool
Examples
--------
>>> is_email('')
False
>>> is_email('[email protected]')
True
>>> is_email('[email protected]')
True
>>> is_email('Martin Thoma <[email protected]>')
False
>>> is_email('info@martin-thoma')
False
### Response:
def is_email(potential_email_address):
"""
Check if potential_email_address is a valid e-mail address.
Please note that this function has no false-negatives but many
false-positives. So if it returns that the input is not a valid
e-mail adress, it certainly isn't. If it returns True, it might still be
invalid. For example, the domain could not be registered.
Parameters
----------
potential_email_address : str
Returns
-------
is_email : bool
Examples
--------
>>> is_email('')
False
>>> is_email('[email protected]')
True
>>> is_email('[email protected]')
True
>>> is_email('Martin Thoma <[email protected]>')
False
>>> is_email('info@martin-thoma')
False
"""
context, mail = parseaddr(potential_email_address)
first_condition = len(context) == 0 and len(mail) != 0
dot_after_at = ('@' in potential_email_address and
'.' in potential_email_address.split('@')[1])
return first_condition and dot_after_at |
def electron_shell_str(shell, shellidx=None):
'''Return a string representing the data for an electron shell
If shellidx (index of the shell) is not None, it will also be printed
'''
am = shell['angular_momentum']
amchar = lut.amint_to_char(am)
amchar = amchar.upper()
shellidx_str = ''
if shellidx is not None:
shellidx_str = 'Index {} '.format(shellidx)
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s = "Shell: {}Region: {}: AM: {}\n".format(shellidx_str, shell['region'], amchar)
s += "Function: {}\n".format(shell['function_type'])
s += write_matrix([exponents, *coefficients], point_places)
return s | Return a string representing the data for an electron shell
If shellidx (index of the shell) is not None, it will also be printed | Below is the the instruction that describes the task:
### Input:
Return a string representing the data for an electron shell
If shellidx (index of the shell) is not None, it will also be printed
### Response:
def electron_shell_str(shell, shellidx=None):
'''Return a string representing the data for an electron shell
If shellidx (index of the shell) is not None, it will also be printed
'''
am = shell['angular_momentum']
amchar = lut.amint_to_char(am)
amchar = amchar.upper()
shellidx_str = ''
if shellidx is not None:
shellidx_str = 'Index {} '.format(shellidx)
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s = "Shell: {}Region: {}: AM: {}\n".format(shellidx_str, shell['region'], amchar)
s += "Function: {}\n".format(shell['function_type'])
s += write_matrix([exponents, *coefficients], point_places)
return s |
def get(self, campaign_id, nick=None):
'''xxxxx.xxxxx.campaign.area.get
===================================
取得一个推广计划的投放地域设置'''
request = TOPRequest('xxxxx.xxxxx.campaign.area.get')
request['campaign_id'] = campaign_id
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignArea})
return self.result | xxxxx.xxxxx.campaign.area.get
===================================
取得一个推广计划的投放地域设置 | Below is the the instruction that describes the task:
### Input:
xxxxx.xxxxx.campaign.area.get
===================================
取得一个推广计划的投放地域设置
### Response:
def get(self, campaign_id, nick=None):
'''xxxxx.xxxxx.campaign.area.get
===================================
取得一个推广计划的投放地域设置'''
request = TOPRequest('xxxxx.xxxxx.campaign.area.get')
request['campaign_id'] = campaign_id
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignArea})
return self.result |
def slamdunkOverallRatesPlot (self):
""" Generate the overall rates plot """
pconfig = {
'id': 'overallratesplot',
'title': 'Slamdunk: Overall conversion rates in reads',
'cpswitch': False,
'cpswitch_c_active': False,
'ylab': 'Number of reads',
'stacking': 'normal',
'tt_decimals': 2,
'tt_suffix': '%',
'tt_percentages': False,
'hide_zero_cats': False,
'data_labels': [
"Plus Strand +",
"Minus Strand -",
]
}
cats = [OrderedDict(), OrderedDict()]
keys = [
['T>C', 'A>T', 'A>G', 'A>C', 'T>A', 'T>G', 'G>A', 'G>T', 'G>C', 'C>A', 'C>T', 'C>G'],
['A>G','A>T','A>C','T>A','T>G','T>C','G>A','G>T','G>C','C>A','C>T','C>G']
]
for i, k in enumerate(keys):
for j, v in enumerate(k):
cats[i][v] = { 'color': self.plot_cols[j] }
self.add_section (
name = 'Conversion rates per read',
anchor = 'slamdunk_overall_rates',
description = """This plot shows the individual conversion rates over all reads.
It shows these conversion rates strand-specific: This means for a properly labeled
sample you would see a T>C excess on the plus-strand and an A>G excess on the minus strand
(see the <a href="http://t-neumann.github.io/slamdunk/docs.html#rates" target="_blank">slamdunk docs</a>).""",
plot = bargraph.plot([self.rates_data_plus,self.rates_data_minus], cats, pconfig)
) | Generate the overall rates plot | Below is the the instruction that describes the task:
### Input:
Generate the overall rates plot
### Response:
def slamdunkOverallRatesPlot (self):
""" Generate the overall rates plot """
pconfig = {
'id': 'overallratesplot',
'title': 'Slamdunk: Overall conversion rates in reads',
'cpswitch': False,
'cpswitch_c_active': False,
'ylab': 'Number of reads',
'stacking': 'normal',
'tt_decimals': 2,
'tt_suffix': '%',
'tt_percentages': False,
'hide_zero_cats': False,
'data_labels': [
"Plus Strand +",
"Minus Strand -",
]
}
cats = [OrderedDict(), OrderedDict()]
keys = [
['T>C', 'A>T', 'A>G', 'A>C', 'T>A', 'T>G', 'G>A', 'G>T', 'G>C', 'C>A', 'C>T', 'C>G'],
['A>G','A>T','A>C','T>A','T>G','T>C','G>A','G>T','G>C','C>A','C>T','C>G']
]
for i, k in enumerate(keys):
for j, v in enumerate(k):
cats[i][v] = { 'color': self.plot_cols[j] }
self.add_section (
name = 'Conversion rates per read',
anchor = 'slamdunk_overall_rates',
description = """This plot shows the individual conversion rates over all reads.
It shows these conversion rates strand-specific: This means for a properly labeled
sample you would see a T>C excess on the plus-strand and an A>G excess on the minus strand
(see the <a href="http://t-neumann.github.io/slamdunk/docs.html#rates" target="_blank">slamdunk docs</a>).""",
plot = bargraph.plot([self.rates_data_plus,self.rates_data_minus], cats, pconfig)
) |
def _resolve_responses(self):
"""
_resolve_responses
"""
while True:
message = self.res_queue.get()
if message is None:
_logger.debug("_resolve_responses thread is terminated")
return
self.__resolve_responses(message) | _resolve_responses | Below is the the instruction that describes the task:
### Input:
_resolve_responses
### Response:
def _resolve_responses(self):
"""
_resolve_responses
"""
while True:
message = self.res_queue.get()
if message is None:
_logger.debug("_resolve_responses thread is terminated")
return
self.__resolve_responses(message) |
def gen_select_list(sig_dic):
'''
For generating List view HTML file for SELECT.
for each item.
'''
view_jushi = '''<span class="label label-primary" style="margin-right:10px">'''
dic_tmp = sig_dic['dic']
for key in dic_tmp.keys():
tmp_str = '''{{% if '{0}' in postinfo.extinfo and postinfo.extinfo["{0}"][0] == "{1}" %}}
{2} {{% end %}}'''.format(sig_dic['en'], key, dic_tmp[key])
view_jushi += tmp_str
view_jushi += '''</span>'''
return view_jushi | For generating List view HTML file for SELECT.
for each item. | Below is the the instruction that describes the task:
### Input:
For generating List view HTML file for SELECT.
for each item.
### Response:
def gen_select_list(sig_dic):
'''
For generating List view HTML file for SELECT.
for each item.
'''
view_jushi = '''<span class="label label-primary" style="margin-right:10px">'''
dic_tmp = sig_dic['dic']
for key in dic_tmp.keys():
tmp_str = '''{{% if '{0}' in postinfo.extinfo and postinfo.extinfo["{0}"][0] == "{1}" %}}
{2} {{% end %}}'''.format(sig_dic['en'], key, dic_tmp[key])
view_jushi += tmp_str
view_jushi += '''</span>'''
return view_jushi |
def create(self, create_missing=None):
"""Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1235377
<https://bugzilla.redhat.com/show_bug.cgi?id=1235377>`_.
"""
return HostGroup(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1235377
<https://bugzilla.redhat.com/show_bug.cgi?id=1235377>`_. | Below is the the instruction that describes the task:
### Input:
Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1235377
<https://bugzilla.redhat.com/show_bug.cgi?id=1235377>`_.
### Response:
def create(self, create_missing=None):
"""Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1235377
<https://bugzilla.redhat.com/show_bug.cgi?id=1235377>`_.
"""
return HostGroup(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() |
def _convert(self, format):
"""Return a new Image instance with the given format.
Returns self if the format is already the same.
"""
if self.format == format:
return self
else:
image = Image(self.pil_image)
image._format = format
return image | Return a new Image instance with the given format.
Returns self if the format is already the same. | Below is the the instruction that describes the task:
### Input:
Return a new Image instance with the given format.
Returns self if the format is already the same.
### Response:
def _convert(self, format):
"""Return a new Image instance with the given format.
Returns self if the format is already the same.
"""
if self.format == format:
return self
else:
image = Image(self.pil_image)
image._format = format
return image |
def create_refresh_token(self, access_token_value):
# type: (str) -> str
"""
Creates an refresh token bound to the specified access token.
"""
if access_token_value not in self.access_tokens:
raise InvalidAccessToken('{} unknown'.format(access_token_value))
if not self.refresh_token_lifetime:
logger.debug('no refresh token issued for for access_token=%s', access_token_value)
return None
refresh_token = rand_str()
authz_info = {'access_token': access_token_value, 'exp': int(time.time()) + self.refresh_token_lifetime}
self.refresh_tokens[refresh_token] = authz_info
logger.debug('issued refresh_token=%s expiring=%d for access_token=%s', refresh_token, authz_info['exp'],
access_token_value)
return refresh_token | Creates an refresh token bound to the specified access token. | Below is the the instruction that describes the task:
### Input:
Creates an refresh token bound to the specified access token.
### Response:
def create_refresh_token(self, access_token_value):
# type: (str) -> str
"""
Creates an refresh token bound to the specified access token.
"""
if access_token_value not in self.access_tokens:
raise InvalidAccessToken('{} unknown'.format(access_token_value))
if not self.refresh_token_lifetime:
logger.debug('no refresh token issued for for access_token=%s', access_token_value)
return None
refresh_token = rand_str()
authz_info = {'access_token': access_token_value, 'exp': int(time.time()) + self.refresh_token_lifetime}
self.refresh_tokens[refresh_token] = authz_info
logger.debug('issued refresh_token=%s expiring=%d for access_token=%s', refresh_token, authz_info['exp'],
access_token_value)
return refresh_token |
def get(self, nicks=[], fields=[]):
'''taobao.users.get 获取多个用户信息'''
request = TOPRequest('taobao.users.get')
request['nicks'] = ','.join(nicks)
if not fields:
user = User()
fields = user.fields
request['fields'] = ','.join(fields)
self.create(self.execute(request))
return self.users | taobao.users.get 获取多个用户信息 | Below is the the instruction that describes the task:
### Input:
taobao.users.get 获取多个用户信息
### Response:
def get(self, nicks=[], fields=[]):
'''taobao.users.get 获取多个用户信息'''
request = TOPRequest('taobao.users.get')
request['nicks'] = ','.join(nicks)
if not fields:
user = User()
fields = user.fields
request['fields'] = ','.join(fields)
self.create(self.execute(request))
return self.users |
def xform(self, left, right, repeating, base, sign):
"""
Return prefixes for tuple.
:param str left: left of the radix
:param str right: right of the radix
:param str repeating: repeating part
:param int base: the base in which value is displayed
:param int sign: -1, 0, 1 as appropriate
:returns: the number string
:rtype: str
"""
# pylint: disable=too-many-arguments
base_prefix = ''
if self.CONFIG.use_prefix:
if base == 8:
base_prefix = '0'
elif base == 16:
base_prefix = '0x'
else:
base_prefix = ''
base_subscript = str(base) if self.CONFIG.use_subscript else ''
result = {
'sign' : '-' if sign == -1 else '',
'base_prefix' : base_prefix,
'left' : left,
'radix' : '.' if (right != "" or repeating != "") else "",
'right' : right,
'repeating' : ("(%s)" % repeating) if repeating != "" else "",
'base_separator' : '' if base_subscript == '' else '_',
'base_subscript' : base_subscript
}
return self._FMT_STR % result | Return prefixes for tuple.
:param str left: left of the radix
:param str right: right of the radix
:param str repeating: repeating part
:param int base: the base in which value is displayed
:param int sign: -1, 0, 1 as appropriate
:returns: the number string
:rtype: str | Below is the the instruction that describes the task:
### Input:
Return prefixes for tuple.
:param str left: left of the radix
:param str right: right of the radix
:param str repeating: repeating part
:param int base: the base in which value is displayed
:param int sign: -1, 0, 1 as appropriate
:returns: the number string
:rtype: str
### Response:
def xform(self, left, right, repeating, base, sign):
"""
Return prefixes for tuple.
:param str left: left of the radix
:param str right: right of the radix
:param str repeating: repeating part
:param int base: the base in which value is displayed
:param int sign: -1, 0, 1 as appropriate
:returns: the number string
:rtype: str
"""
# pylint: disable=too-many-arguments
base_prefix = ''
if self.CONFIG.use_prefix:
if base == 8:
base_prefix = '0'
elif base == 16:
base_prefix = '0x'
else:
base_prefix = ''
base_subscript = str(base) if self.CONFIG.use_subscript else ''
result = {
'sign' : '-' if sign == -1 else '',
'base_prefix' : base_prefix,
'left' : left,
'radix' : '.' if (right != "" or repeating != "") else "",
'right' : right,
'repeating' : ("(%s)" % repeating) if repeating != "" else "",
'base_separator' : '' if base_subscript == '' else '_',
'base_subscript' : base_subscript
}
return self._FMT_STR % result |
def write(self, f):
""" Write namespace as INI file.
:param f: File object or path to file.
"""
if isinstance(f, str):
f = io.open(f, 'w', encoding='utf-8')
if not hasattr(f, 'read'):
raise AttributeError("Wrong type of file: {0}".format(type(f)))
NS_LOGGER.info('Write to `{0}`'.format(f.name))
for section in self.sections.keys():
f.write('[{0}]\n'.format(section))
for k, v in self[section].items():
f.write('{0:15}= {1}\n'.format(k, v))
f.write('\n')
f.close() | Write namespace as INI file.
:param f: File object or path to file. | Below is the the instruction that describes the task:
### Input:
Write namespace as INI file.
:param f: File object or path to file.
### Response:
def write(self, f):
""" Write namespace as INI file.
:param f: File object or path to file.
"""
if isinstance(f, str):
f = io.open(f, 'w', encoding='utf-8')
if not hasattr(f, 'read'):
raise AttributeError("Wrong type of file: {0}".format(type(f)))
NS_LOGGER.info('Write to `{0}`'.format(f.name))
for section in self.sections.keys():
f.write('[{0}]\n'.format(section))
for k, v in self[section].items():
f.write('{0:15}= {1}\n'.format(k, v))
f.write('\n')
f.close() |
def raw_reader(queue=None):
"""Returns a raw binary reader co-routine.
Args:
queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a
new one will be created.
Yields:
IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed
in the middle of a value or ``STREAM_END`` if there is no data **and** the parser
is not in the middle of parsing a value.
Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP``
to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE``
or ``STREAM_END`` event type.
``SKIP`` is only allowed within a container. A reader is *in* a container
when the ``CONTAINER_START`` event type is encountered and *not in* a container
when the ``CONTAINER_END`` event type for that container is encountered.
"""
if queue is None:
queue = BufferQueue()
ctx = _HandlerContext(
position=0,
limit=None,
queue=queue,
field_name=None,
annotations=None,
depth=0,
whence=None
)
return reader_trampoline(_container_handler(None, ctx)) | Returns a raw binary reader co-routine.
Args:
queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a
new one will be created.
Yields:
IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed
in the middle of a value or ``STREAM_END`` if there is no data **and** the parser
is not in the middle of parsing a value.
Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP``
to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE``
or ``STREAM_END`` event type.
``SKIP`` is only allowed within a container. A reader is *in* a container
when the ``CONTAINER_START`` event type is encountered and *not in* a container
when the ``CONTAINER_END`` event type for that container is encountered. | Below is the the instruction that describes the task:
### Input:
Returns a raw binary reader co-routine.
Args:
queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a
new one will be created.
Yields:
IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed
in the middle of a value or ``STREAM_END`` if there is no data **and** the parser
is not in the middle of parsing a value.
Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP``
to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE``
or ``STREAM_END`` event type.
``SKIP`` is only allowed within a container. A reader is *in* a container
when the ``CONTAINER_START`` event type is encountered and *not in* a container
when the ``CONTAINER_END`` event type for that container is encountered.
### Response:
def raw_reader(queue=None):
"""Returns a raw binary reader co-routine.
Args:
queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a
new one will be created.
Yields:
IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed
in the middle of a value or ``STREAM_END`` if there is no data **and** the parser
is not in the middle of parsing a value.
Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP``
to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE``
or ``STREAM_END`` event type.
``SKIP`` is only allowed within a container. A reader is *in* a container
when the ``CONTAINER_START`` event type is encountered and *not in* a container
when the ``CONTAINER_END`` event type for that container is encountered.
"""
if queue is None:
queue = BufferQueue()
ctx = _HandlerContext(
position=0,
limit=None,
queue=queue,
field_name=None,
annotations=None,
depth=0,
whence=None
)
return reader_trampoline(_container_handler(None, ctx)) |
def _fetch(self, endpoint_name, **params):
"""
Wrapper for making an api request from giphy
"""
params['api_key'] = self.api_key
resp = requests.get(self._endpoint(endpoint_name), params=params)
resp.raise_for_status()
data = resp.json()
self._check_or_raise(data.get('meta', {}))
return data | Wrapper for making an api request from giphy | Below is the the instruction that describes the task:
### Input:
Wrapper for making an api request from giphy
### Response:
def _fetch(self, endpoint_name, **params):
"""
Wrapper for making an api request from giphy
"""
params['api_key'] = self.api_key
resp = requests.get(self._endpoint(endpoint_name), params=params)
resp.raise_for_status()
data = resp.json()
self._check_or_raise(data.get('meta', {}))
return data |
def _set_clear_mpls_auto_bandwidth_statistics_lsp(self, v, load=False):
"""
Setter method for clear_mpls_auto_bandwidth_statistics_lsp, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_clear_mpls_auto_bandwidth_statistics_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clear_mpls_auto_bandwidth_statistics_lsp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=clear_mpls_auto_bandwidth_statistics_lsp.clear_mpls_auto_bandwidth_statistics_lsp, is_leaf=True, yang_name="clear-mpls-auto-bandwidth-statistics-lsp", rest_name="clear-mpls-auto-bandwidth-statistics-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """clear_mpls_auto_bandwidth_statistics_lsp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=clear_mpls_auto_bandwidth_statistics_lsp.clear_mpls_auto_bandwidth_statistics_lsp, is_leaf=True, yang_name="clear-mpls-auto-bandwidth-statistics-lsp", rest_name="clear-mpls-auto-bandwidth-statistics-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__clear_mpls_auto_bandwidth_statistics_lsp = t
if hasattr(self, '_set'):
self._set() | Setter method for clear_mpls_auto_bandwidth_statistics_lsp, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_clear_mpls_auto_bandwidth_statistics_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clear_mpls_auto_bandwidth_statistics_lsp() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for clear_mpls_auto_bandwidth_statistics_lsp, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_clear_mpls_auto_bandwidth_statistics_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clear_mpls_auto_bandwidth_statistics_lsp() directly.
### Response:
def _set_clear_mpls_auto_bandwidth_statistics_lsp(self, v, load=False):
"""
Setter method for clear_mpls_auto_bandwidth_statistics_lsp, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_clear_mpls_auto_bandwidth_statistics_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clear_mpls_auto_bandwidth_statistics_lsp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=clear_mpls_auto_bandwidth_statistics_lsp.clear_mpls_auto_bandwidth_statistics_lsp, is_leaf=True, yang_name="clear-mpls-auto-bandwidth-statistics-lsp", rest_name="clear-mpls-auto-bandwidth-statistics-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """clear_mpls_auto_bandwidth_statistics_lsp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=clear_mpls_auto_bandwidth_statistics_lsp.clear_mpls_auto_bandwidth_statistics_lsp, is_leaf=True, yang_name="clear-mpls-auto-bandwidth-statistics-lsp", rest_name="clear-mpls-auto-bandwidth-statistics-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__clear_mpls_auto_bandwidth_statistics_lsp = t
if hasattr(self, '_set'):
self._set() |
def libvlc_media_list_index_of_item(p_ml, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
f = _Cfunctions.get('libvlc_media_list_index_of_item', None) or \
_Cfunction('libvlc_media_list_index_of_item', ((1,), (1,),), None,
ctypes.c_int, MediaList, Media)
return f(p_ml, p_md) | Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance.
@return: position of media instance or -1 if media not found. | Below is the the instruction that describes the task:
### Input:
Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
### Response:
def libvlc_media_list_index_of_item(p_ml, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
f = _Cfunctions.get('libvlc_media_list_index_of_item', None) or \
_Cfunction('libvlc_media_list_index_of_item', ((1,), (1,),), None,
ctypes.c_int, MediaList, Media)
return f(p_ml, p_md) |
def times(self, a):
""" Multiply. """
return Vector(self.x*a, self.y*a, self.z*a) | Multiply. | Below is the the instruction that describes the task:
### Input:
Multiply.
### Response:
def times(self, a):
""" Multiply. """
return Vector(self.x*a, self.y*a, self.z*a) |
def calcWeightedAvg(data,weights):
'''
Generates a weighted average of simulated data. The Nth row of data is averaged
and then weighted by the Nth element of weights in an aggregate average.
Parameters
----------
data : numpy.array
An array of data with N rows of J floats
weights : numpy.array
A length N array of weights for the N rows of data.
Returns
-------
weighted_sum : float
The weighted sum of the data.
'''
data_avg = np.mean(data,axis=1)
weighted_sum = np.dot(data_avg,weights)
return weighted_sum | Generates a weighted average of simulated data. The Nth row of data is averaged
and then weighted by the Nth element of weights in an aggregate average.
Parameters
----------
data : numpy.array
An array of data with N rows of J floats
weights : numpy.array
A length N array of weights for the N rows of data.
Returns
-------
weighted_sum : float
The weighted sum of the data. | Below is the the instruction that describes the task:
### Input:
Generates a weighted average of simulated data. The Nth row of data is averaged
and then weighted by the Nth element of weights in an aggregate average.
Parameters
----------
data : numpy.array
An array of data with N rows of J floats
weights : numpy.array
A length N array of weights for the N rows of data.
Returns
-------
weighted_sum : float
The weighted sum of the data.
### Response:
def calcWeightedAvg(data,weights):
'''
Generates a weighted average of simulated data. The Nth row of data is averaged
and then weighted by the Nth element of weights in an aggregate average.
Parameters
----------
data : numpy.array
An array of data with N rows of J floats
weights : numpy.array
A length N array of weights for the N rows of data.
Returns
-------
weighted_sum : float
The weighted sum of the data.
'''
data_avg = np.mean(data,axis=1)
weighted_sum = np.dot(data_avg,weights)
return weighted_sum |
def _make_like(self, column, format, value):
"""
make like condition
:param column: column object
:param format: '%_' '_%' '%_%'
:param value: column value
:return: condition object
"""
c = []
if format.startswith('%'):
c.append('%')
c.append(value)
if format.endswith('%'):
c.append('%')
return column.like(''.join(c)) | make like condition
:param column: column object
:param format: '%_' '_%' '%_%'
:param value: column value
:return: condition object | Below is the the instruction that describes the task:
### Input:
make like condition
:param column: column object
:param format: '%_' '_%' '%_%'
:param value: column value
:return: condition object
### Response:
def _make_like(self, column, format, value):
"""
make like condition
:param column: column object
:param format: '%_' '_%' '%_%'
:param value: column value
:return: condition object
"""
c = []
if format.startswith('%'):
c.append('%')
c.append(value)
if format.endswith('%'):
c.append('%')
return column.like(''.join(c)) |
def build(self, **kw):
"""Actually build the node.
This is called by the Taskmaster after it's decided that the
Node is out-of-date and must be rebuilt, and after the prepare()
method has gotten everything, uh, prepared.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff
in built().
"""
try:
self.get_executor()(self, **kw)
except SCons.Errors.BuildError as e:
e.node = self
raise | Actually build the node.
This is called by the Taskmaster after it's decided that the
Node is out-of-date and must be rebuilt, and after the prepare()
method has gotten everything, uh, prepared.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff
in built(). | Below is the the instruction that describes the task:
### Input:
Actually build the node.
This is called by the Taskmaster after it's decided that the
Node is out-of-date and must be rebuilt, and after the prepare()
method has gotten everything, uh, prepared.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff
in built().
### Response:
def build(self, **kw):
"""Actually build the node.
This is called by the Taskmaster after it's decided that the
Node is out-of-date and must be rebuilt, and after the prepare()
method has gotten everything, uh, prepared.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff
in built().
"""
try:
self.get_executor()(self, **kw)
except SCons.Errors.BuildError as e:
e.node = self
raise |
def niggli_reduce(lattice, eps=1e-5):
"""Run Niggli reduction
Args:
lattice: Lattice parameters in the form of
[[a_x, a_y, a_z],
[b_x, b_y, b_z],
[c_x, c_y, c_z]]
eps:
float: Tolerance to check if difference of norms of two basis
vectors is close to zero or not and if two basis vectors are
orthogonal by the value of dot product being close to zero or
not. The detail is shown at
https://atztogo.github.io/niggli/.
Returns:
if the Niggli reduction succeeded:
Reduced lattice parameters are given as a numpy 'double' array:
[[a_x, a_y, a_z],
[b_x, b_y, b_z],
[c_x, c_y, c_z]]
otherwise None is returned.
"""
_set_no_error()
niggli_lattice = np.array(np.transpose(lattice), dtype='double', order='C')
result = spg.niggli_reduce(niggli_lattice, float(eps))
_set_error_message()
if result == 0:
return None
else:
return np.array(np.transpose(niggli_lattice),
dtype='double', order='C') | Run Niggli reduction
Args:
lattice: Lattice parameters in the form of
[[a_x, a_y, a_z],
[b_x, b_y, b_z],
[c_x, c_y, c_z]]
eps:
float: Tolerance to check if difference of norms of two basis
vectors is close to zero or not and if two basis vectors are
orthogonal by the value of dot product being close to zero or
not. The detail is shown at
https://atztogo.github.io/niggli/.
Returns:
if the Niggli reduction succeeded:
Reduced lattice parameters are given as a numpy 'double' array:
[[a_x, a_y, a_z],
[b_x, b_y, b_z],
[c_x, c_y, c_z]]
otherwise None is returned. | Below is the the instruction that describes the task:
### Input:
Run Niggli reduction
Args:
lattice: Lattice parameters in the form of
[[a_x, a_y, a_z],
[b_x, b_y, b_z],
[c_x, c_y, c_z]]
eps:
float: Tolerance to check if difference of norms of two basis
vectors is close to zero or not and if two basis vectors are
orthogonal by the value of dot product being close to zero or
not. The detail is shown at
https://atztogo.github.io/niggli/.
Returns:
if the Niggli reduction succeeded:
Reduced lattice parameters are given as a numpy 'double' array:
[[a_x, a_y, a_z],
[b_x, b_y, b_z],
[c_x, c_y, c_z]]
otherwise None is returned.
### Response:
def niggli_reduce(lattice, eps=1e-5):
"""Run Niggli reduction
Args:
lattice: Lattice parameters in the form of
[[a_x, a_y, a_z],
[b_x, b_y, b_z],
[c_x, c_y, c_z]]
eps:
float: Tolerance to check if difference of norms of two basis
vectors is close to zero or not and if two basis vectors are
orthogonal by the value of dot product being close to zero or
not. The detail is shown at
https://atztogo.github.io/niggli/.
Returns:
if the Niggli reduction succeeded:
Reduced lattice parameters are given as a numpy 'double' array:
[[a_x, a_y, a_z],
[b_x, b_y, b_z],
[c_x, c_y, c_z]]
otherwise None is returned.
"""
_set_no_error()
niggli_lattice = np.array(np.transpose(lattice), dtype='double', order='C')
result = spg.niggli_reduce(niggli_lattice, float(eps))
_set_error_message()
if result == 0:
return None
else:
return np.array(np.transpose(niggli_lattice),
dtype='double', order='C') |
def conditional_committors(source, sink, waypoint, msm):
"""
Computes the conditional committors :math:`q^{ABC^+}` which are is the
probability of starting in one state and visiting state B before A while
also visiting state C at some point.
Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`,
with ``sources = A``, ``sinks = B``, ``waypoint = C``
Parameters
----------
waypoint : int
The index of the intermediate state
source : int
The index of the source state
sink : int
The index of the sink state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
See Also
--------
msmbuilder.tpt.fraction_visited : function
Calculate the fraction of visits to a waypoint from a given
source to a sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
Notes
-----
Employs dense linear algebra, memory use scales as N^2,
and cycle use scales as N^3
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
"""
# typecheck
for data in [source, sink, waypoint]:
if not isinstance(data, int):
raise ValueError("source, sink, and waypoint must be integers.")
if (source == waypoint) or (sink == waypoint) or (sink == source):
raise ValueError('source, sink, waypoint must all be disjoint!')
if hasattr(msm, 'all_transmats_'):
cond_committors = np.zeros(msm.all_transmats_.shape[:2])
for i, tprob in enumerate(msm.all_transmats_):
cond_committors[i, :] = _conditional_committors(source, sink,
waypoint, tprob)
return np.median(cond_committors, axis=0)
return _conditional_committors(source, sink, waypoint, msm.transmat_) | Computes the conditional committors :math:`q^{ABC^+}` which are is the
probability of starting in one state and visiting state B before A while
also visiting state C at some point.
Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`,
with ``sources = A``, ``sinks = B``, ``waypoint = C``
Parameters
----------
waypoint : int
The index of the intermediate state
source : int
The index of the source state
sink : int
The index of the sink state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
See Also
--------
msmbuilder.tpt.fraction_visited : function
Calculate the fraction of visits to a waypoint from a given
source to a sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
Notes
-----
Employs dense linear algebra, memory use scales as N^2,
and cycle use scales as N^3
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. | Below is the the instruction that describes the task:
### Input:
Computes the conditional committors :math:`q^{ABC^+}` which are is the
probability of starting in one state and visiting state B before A while
also visiting state C at some point.
Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`,
with ``sources = A``, ``sinks = B``, ``waypoint = C``
Parameters
----------
waypoint : int
The index of the intermediate state
source : int
The index of the source state
sink : int
The index of the sink state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
See Also
--------
msmbuilder.tpt.fraction_visited : function
Calculate the fraction of visits to a waypoint from a given
source to a sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
Notes
-----
Employs dense linear algebra, memory use scales as N^2,
and cycle use scales as N^3
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
### Response:
def conditional_committors(source, sink, waypoint, msm):
"""
Computes the conditional committors :math:`q^{ABC^+}` which are is the
probability of starting in one state and visiting state B before A while
also visiting state C at some point.
Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`,
with ``sources = A``, ``sinks = B``, ``waypoint = C``
Parameters
----------
waypoint : int
The index of the intermediate state
source : int
The index of the source state
sink : int
The index of the sink state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
See Also
--------
msmbuilder.tpt.fraction_visited : function
Calculate the fraction of visits to a waypoint from a given
source to a sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
Notes
-----
Employs dense linear algebra, memory use scales as N^2,
and cycle use scales as N^3
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
"""
# typecheck
for data in [source, sink, waypoint]:
if not isinstance(data, int):
raise ValueError("source, sink, and waypoint must be integers.")
if (source == waypoint) or (sink == waypoint) or (sink == source):
raise ValueError('source, sink, waypoint must all be disjoint!')
if hasattr(msm, 'all_transmats_'):
cond_committors = np.zeros(msm.all_transmats_.shape[:2])
for i, tprob in enumerate(msm.all_transmats_):
cond_committors[i, :] = _conditional_committors(source, sink,
waypoint, tprob)
return np.median(cond_committors, axis=0)
return _conditional_committors(source, sink, waypoint, msm.transmat_) |
def btc_witness_script_deserialize(_script):
"""
Given a hex-encoded serialized witness script, turn it into a witness stack
(i.e. an array of Nones, ints, and strings)
"""
script = None
if isinstance(_script, str) and re.match('^[0-9a-fA-F]*$', _script):
# convert from hex to bin, safely
script = binascii.unhexlify(_script)
else:
script = _script[:]
# pointer to byte offset in _script (as an array due to Python scoping rules)
ptr = [0]
witness_stack_len = read_var_int(ptr, script)
witness_stack = []
for _ in xrange(0, witness_stack_len):
stack_item = read_var_string(ptr, script)
witness_stack.append(stack_item)
return witness_stack | Given a hex-encoded serialized witness script, turn it into a witness stack
(i.e. an array of Nones, ints, and strings) | Below is the the instruction that describes the task:
### Input:
Given a hex-encoded serialized witness script, turn it into a witness stack
(i.e. an array of Nones, ints, and strings)
### Response:
def btc_witness_script_deserialize(_script):
"""
Given a hex-encoded serialized witness script, turn it into a witness stack
(i.e. an array of Nones, ints, and strings)
"""
script = None
if isinstance(_script, str) and re.match('^[0-9a-fA-F]*$', _script):
# convert from hex to bin, safely
script = binascii.unhexlify(_script)
else:
script = _script[:]
# pointer to byte offset in _script (as an array due to Python scoping rules)
ptr = [0]
witness_stack_len = read_var_int(ptr, script)
witness_stack = []
for _ in xrange(0, witness_stack_len):
stack_item = read_var_string(ptr, script)
witness_stack.append(stack_item)
return witness_stack |
def mouse_move(self, event):
"""
The following gets back coordinates of the mouse on the canvas.
"""
if (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
self.posX = event.xdata
self.posY = event.ydata
self.graphic_target(self.posX, self.posY) | The following gets back coordinates of the mouse on the canvas. | Below is the the instruction that describes the task:
### Input:
The following gets back coordinates of the mouse on the canvas.
### Response:
def mouse_move(self, event):
"""
The following gets back coordinates of the mouse on the canvas.
"""
if (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
self.posX = event.xdata
self.posY = event.ydata
self.graphic_target(self.posX, self.posY) |
def _query_response_to_snapshot(response_pb, collection, expected_prefix):
"""Parse a query response protobuf to a document snapshot.
Args:
response_pb (google.cloud.proto.firestore.v1beta1.\
firestore_pb2.RunQueryResponse): A
collection (~.firestore_v1beta1.collection.CollectionReference): A
reference to the collection that initiated the query.
expected_prefix (str): The expected prefix for fully-qualified
document names returned in the query results. This can be computed
directly from ``collection`` via :meth:`_parent_info`.
Returns:
Optional[~.firestore.document.DocumentSnapshot]: A
snapshot of the data returned in the query. If ``response_pb.document``
is not set, the snapshot will be :data:`None`.
"""
if not response_pb.HasField("document"):
return None
document_id = _helpers.get_doc_id(response_pb.document, expected_prefix)
reference = collection.document(document_id)
data = _helpers.decode_dict(response_pb.document.fields, collection._client)
snapshot = document.DocumentSnapshot(
reference,
data,
exists=True,
read_time=response_pb.read_time,
create_time=response_pb.document.create_time,
update_time=response_pb.document.update_time,
)
return snapshot | Parse a query response protobuf to a document snapshot.
Args:
response_pb (google.cloud.proto.firestore.v1beta1.\
firestore_pb2.RunQueryResponse): A
collection (~.firestore_v1beta1.collection.CollectionReference): A
reference to the collection that initiated the query.
expected_prefix (str): The expected prefix for fully-qualified
document names returned in the query results. This can be computed
directly from ``collection`` via :meth:`_parent_info`.
Returns:
Optional[~.firestore.document.DocumentSnapshot]: A
snapshot of the data returned in the query. If ``response_pb.document``
is not set, the snapshot will be :data:`None`. | Below is the the instruction that describes the task:
### Input:
Parse a query response protobuf to a document snapshot.
Args:
response_pb (google.cloud.proto.firestore.v1beta1.\
firestore_pb2.RunQueryResponse): A
collection (~.firestore_v1beta1.collection.CollectionReference): A
reference to the collection that initiated the query.
expected_prefix (str): The expected prefix for fully-qualified
document names returned in the query results. This can be computed
directly from ``collection`` via :meth:`_parent_info`.
Returns:
Optional[~.firestore.document.DocumentSnapshot]: A
snapshot of the data returned in the query. If ``response_pb.document``
is not set, the snapshot will be :data:`None`.
### Response:
def _query_response_to_snapshot(response_pb, collection, expected_prefix):
"""Parse a query response protobuf to a document snapshot.
Args:
response_pb (google.cloud.proto.firestore.v1beta1.\
firestore_pb2.RunQueryResponse): A
collection (~.firestore_v1beta1.collection.CollectionReference): A
reference to the collection that initiated the query.
expected_prefix (str): The expected prefix for fully-qualified
document names returned in the query results. This can be computed
directly from ``collection`` via :meth:`_parent_info`.
Returns:
Optional[~.firestore.document.DocumentSnapshot]: A
snapshot of the data returned in the query. If ``response_pb.document``
is not set, the snapshot will be :data:`None`.
"""
if not response_pb.HasField("document"):
return None
document_id = _helpers.get_doc_id(response_pb.document, expected_prefix)
reference = collection.document(document_id)
data = _helpers.decode_dict(response_pb.document.fields, collection._client)
snapshot = document.DocumentSnapshot(
reference,
data,
exists=True,
read_time=response_pb.read_time,
create_time=response_pb.document.create_time,
update_time=response_pb.document.update_time,
)
return snapshot |
def __callback (self, odom):
'''
Callback function to receive and save Pose3d.
@param odom: ROS Odometry received
@type odom: Odometry
'''
pose = odometry2Pose3D(odom)
self.lock.acquire()
self.data = pose
self.lock.release() | Callback function to receive and save Pose3d.
@param odom: ROS Odometry received
@type odom: Odometry | Below is the the instruction that describes the task:
### Input:
Callback function to receive and save Pose3d.
@param odom: ROS Odometry received
@type odom: Odometry
### Response:
def __callback (self, odom):
'''
Callback function to receive and save Pose3d.
@param odom: ROS Odometry received
@type odom: Odometry
'''
pose = odometry2Pose3D(odom)
self.lock.acquire()
self.data = pose
self.lock.release() |
def set_nested(data, value, *keys):
"""Assign to a nested dictionary.
:param dict data: Dictionary to mutate
:param value: Value to set
:param list *keys: List of nested keys
>>> data = {}
>>> set_nested(data, 'hi', 'k0', 'k1', 'k2')
>>> data
{'k0': {'k1': {'k2': 'hi'}}}
"""
if len(keys) == 1:
data[keys[0]] = value
else:
if keys[0] not in data:
data[keys[0]] = {}
set_nested(data[keys[0]], value, *keys[1:]) | Assign to a nested dictionary.
:param dict data: Dictionary to mutate
:param value: Value to set
:param list *keys: List of nested keys
>>> data = {}
>>> set_nested(data, 'hi', 'k0', 'k1', 'k2')
>>> data
{'k0': {'k1': {'k2': 'hi'}}} | Below is the the instruction that describes the task:
### Input:
Assign to a nested dictionary.
:param dict data: Dictionary to mutate
:param value: Value to set
:param list *keys: List of nested keys
>>> data = {}
>>> set_nested(data, 'hi', 'k0', 'k1', 'k2')
>>> data
{'k0': {'k1': {'k2': 'hi'}}}
### Response:
def set_nested(data, value, *keys):
"""Assign to a nested dictionary.
:param dict data: Dictionary to mutate
:param value: Value to set
:param list *keys: List of nested keys
>>> data = {}
>>> set_nested(data, 'hi', 'k0', 'k1', 'k2')
>>> data
{'k0': {'k1': {'k2': 'hi'}}}
"""
if len(keys) == 1:
data[keys[0]] = value
else:
if keys[0] not in data:
data[keys[0]] = {}
set_nested(data[keys[0]], value, *keys[1:]) |
def get_raw_file():
"""
Get the raw divider file in a string array.
:return: the array
:rtype: str
"""
with open("{0}/dividers.txt".format(
os.path.abspath(
os.path.dirname(__file__)
)
), mode="r") as file_handler:
lines = file_handler.readlines()
lines[35] = str(
random.randint(0, 999999999999)
)
return lines | Get the raw divider file in a string array.
:return: the array
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the raw divider file in a string array.
:return: the array
:rtype: str
### Response:
def get_raw_file():
"""
Get the raw divider file in a string array.
:return: the array
:rtype: str
"""
with open("{0}/dividers.txt".format(
os.path.abspath(
os.path.dirname(__file__)
)
), mode="r") as file_handler:
lines = file_handler.readlines()
lines[35] = str(
random.randint(0, 999999999999)
)
return lines |
def DemangleName(self, name):
"Applies some simple heuristics to split names into (city, name)."
# Handle special cases where our heuristcs doesn't work.
# Example:"Triemli" --> ("Triemli", "Zurich").
if name in SPECIAL_NAMES:
return SPECIAL_NAMES[name]
# Expand abbreviations.
for abbrev, expanded in [('str.', 'strasse'),
('Schiffst.', 'Schiffstation')]:
suffix_pos = name.rfind(abbrev)
if suffix_pos > 0:
name = name[:suffix_pos] + expanded
#end for
names = name.split(", ", 1)
if len(names) == 2:
if names[1] in POI_TERMS:
nam = u'%s %s' % (names[0], names[1])
else:
nam = names[1]
city = names[0]
else:
# "Zurich Enge": First word of station name designates the city
nam = names[0]
city = nam.split(' ')[0]
return (nam, SPECIAL_CITIES.get(city, city)) | Applies some simple heuristics to split names into (city, name). | Below is the the instruction that describes the task:
### Input:
Applies some simple heuristics to split names into (city, name).
### Response:
def DemangleName(self, name):
"Applies some simple heuristics to split names into (city, name)."
# Handle special cases where our heuristcs doesn't work.
# Example:"Triemli" --> ("Triemli", "Zurich").
if name in SPECIAL_NAMES:
return SPECIAL_NAMES[name]
# Expand abbreviations.
for abbrev, expanded in [('str.', 'strasse'),
('Schiffst.', 'Schiffstation')]:
suffix_pos = name.rfind(abbrev)
if suffix_pos > 0:
name = name[:suffix_pos] + expanded
#end for
names = name.split(", ", 1)
if len(names) == 2:
if names[1] in POI_TERMS:
nam = u'%s %s' % (names[0], names[1])
else:
nam = names[1]
city = names[0]
else:
# "Zurich Enge": First word of station name designates the city
nam = names[0]
city = nam.split(' ')[0]
return (nam, SPECIAL_CITIES.get(city, city)) |
def normalise_rows(matrix):
""" Scales all rows to length 1. Fails when row is 0-length, so it
leaves these unchanged """
lengths = np.apply_along_axis(np.linalg.norm, 1, matrix)
if not (lengths > 0).all():
# raise ValueError('Cannot normalise 0 length vector to length 1')
# print(matrix)
lengths[lengths == 0] = 1
return matrix / lengths[:, np.newaxis] | Scales all rows to length 1. Fails when row is 0-length, so it
leaves these unchanged | Below is the the instruction that describes the task:
### Input:
Scales all rows to length 1. Fails when row is 0-length, so it
leaves these unchanged
### Response:
def normalise_rows(matrix):
""" Scales all rows to length 1. Fails when row is 0-length, so it
leaves these unchanged """
lengths = np.apply_along_axis(np.linalg.norm, 1, matrix)
if not (lengths > 0).all():
# raise ValueError('Cannot normalise 0 length vector to length 1')
# print(matrix)
lengths[lengths == 0] = 1
return matrix / lengths[:, np.newaxis] |
def start(self, timeout=None, task=None, block_callbacks=False, role=None):
"""Start PostgreSQL
Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion
or failure.
:returns: True if start was initiated and postmaster ports are open, False if start failed"""
# make sure we close all connections established against
# the former node, otherwise, we might get a stalled one
# after kill -9, which would report incorrect data to
# patroni.
self.close_connection()
if self.is_running():
logger.error('Cannot start PostgreSQL because one is already running.')
self.set_state('starting')
return True
if not block_callbacks:
self.__cb_pending = ACTION_ON_START
self.set_role(role or self.get_postgres_role_from_data_directory())
self.set_state('starting')
self._pending_restart = False
configuration = self._server_parameters if self.role == 'master' else self._build_effective_configuration()
self._write_postgresql_conf(configuration)
self.resolve_connection_addresses()
self._replace_pg_hba()
self._replace_pg_ident()
options = ['--{0}={1}'.format(p, configuration[p]) for p in self.CMDLINE_OPTIONS
if p in configuration and p != 'wal_keep_segments']
with self._cancellable_lock:
if self._is_cancelled:
return False
with task or null_context():
if task and task.is_cancelled:
logger.info("PostgreSQL start cancelled.")
return False
self._postmaster_proc = PostmasterProcess.start(self._pgcommand('postgres'),
self._data_dir,
self._postgresql_conf,
options)
if task:
task.complete(self._postmaster_proc)
start_timeout = timeout
if not start_timeout:
try:
start_timeout = float(self.config.get('pg_ctl_timeout', 60))
except ValueError:
start_timeout = 60
# We want postmaster to open ports before we continue
if not self._postmaster_proc or not self.wait_for_port_open(self._postmaster_proc, start_timeout):
return False
ret = self.wait_for_startup(start_timeout)
if ret is not None:
return ret
elif timeout is not None:
return False
else:
return None | Start PostgreSQL
Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion
or failure.
:returns: True if start was initiated and postmaster ports are open, False if start failed | Below is the the instruction that describes the task:
### Input:
Start PostgreSQL
Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion
or failure.
:returns: True if start was initiated and postmaster ports are open, False if start failed
### Response:
def start(self, timeout=None, task=None, block_callbacks=False, role=None):
"""Start PostgreSQL
Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion
or failure.
:returns: True if start was initiated and postmaster ports are open, False if start failed"""
# make sure we close all connections established against
# the former node, otherwise, we might get a stalled one
# after kill -9, which would report incorrect data to
# patroni.
self.close_connection()
if self.is_running():
logger.error('Cannot start PostgreSQL because one is already running.')
self.set_state('starting')
return True
if not block_callbacks:
self.__cb_pending = ACTION_ON_START
self.set_role(role or self.get_postgres_role_from_data_directory())
self.set_state('starting')
self._pending_restart = False
configuration = self._server_parameters if self.role == 'master' else self._build_effective_configuration()
self._write_postgresql_conf(configuration)
self.resolve_connection_addresses()
self._replace_pg_hba()
self._replace_pg_ident()
options = ['--{0}={1}'.format(p, configuration[p]) for p in self.CMDLINE_OPTIONS
if p in configuration and p != 'wal_keep_segments']
with self._cancellable_lock:
if self._is_cancelled:
return False
with task or null_context():
if task and task.is_cancelled:
logger.info("PostgreSQL start cancelled.")
return False
self._postmaster_proc = PostmasterProcess.start(self._pgcommand('postgres'),
self._data_dir,
self._postgresql_conf,
options)
if task:
task.complete(self._postmaster_proc)
start_timeout = timeout
if not start_timeout:
try:
start_timeout = float(self.config.get('pg_ctl_timeout', 60))
except ValueError:
start_timeout = 60
# We want postmaster to open ports before we continue
if not self._postmaster_proc or not self.wait_for_port_open(self._postmaster_proc, start_timeout):
return False
ret = self.wait_for_startup(start_timeout)
if ret is not None:
return ret
elif timeout is not None:
return False
else:
return None |
def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string | Removes the TENANT_TOKEN from a particular string | Below is the the instruction that describes the task:
### Input:
Removes the TENANT_TOKEN from a particular string
### Response:
def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string |
def get_last_update(op):
"""Return the most recent timestamp in the operation."""
last_update = get_end_time(op)
if not last_update:
last_event = get_last_event(op)
if last_event:
last_update = last_event['timestamp']
if not last_update:
last_update = get_create_time(op)
return last_update | Return the most recent timestamp in the operation. | Below is the the instruction that describes the task:
### Input:
Return the most recent timestamp in the operation.
### Response:
def get_last_update(op):
"""Return the most recent timestamp in the operation."""
last_update = get_end_time(op)
if not last_update:
last_event = get_last_event(op)
if last_event:
last_update = last_event['timestamp']
if not last_update:
last_update = get_create_time(op)
return last_update |
def read_in_weight_table(self, in_weight_table):
"""
Read in weight table
"""
print("Reading the weight table...")
with open_csv(in_weight_table, "r") as csvfile:
reader = csv.reader(csvfile)
header_row = next(reader)
# check number of columns in the weight table
if len(header_row) < len(self.header_wt):
raise Exception(self.error_messages[4])
# check header
if header_row[1:len(self.header_wt)] != self.header_wt[1:]:
raise Exception(self.error_messages[5])
self.dict_list = \
np.loadtxt(
in_weight_table,
delimiter=",",
usecols=(0, 1, 2, 3, 4),
skiprows=1,
dtype={
'names': (self.header_wt[0],
self.header_wt[1],
self.header_wt[2],
self.header_wt[3],
self.header_wt[4]),
'formats': ('i8', 'f8', 'i8', 'i8', 'i8')
},
)
self.count = self.dict_list.shape[0]
self.size_stream_id = \
len(np.unique(np.array(self.dict_list[self.header_wt[0]],
dtype=np.int32))) | Read in weight table | Below is the the instruction that describes the task:
### Input:
Read in weight table
### Response:
def read_in_weight_table(self, in_weight_table):
"""
Read in weight table
"""
print("Reading the weight table...")
with open_csv(in_weight_table, "r") as csvfile:
reader = csv.reader(csvfile)
header_row = next(reader)
# check number of columns in the weight table
if len(header_row) < len(self.header_wt):
raise Exception(self.error_messages[4])
# check header
if header_row[1:len(self.header_wt)] != self.header_wt[1:]:
raise Exception(self.error_messages[5])
self.dict_list = \
np.loadtxt(
in_weight_table,
delimiter=",",
usecols=(0, 1, 2, 3, 4),
skiprows=1,
dtype={
'names': (self.header_wt[0],
self.header_wt[1],
self.header_wt[2],
self.header_wt[3],
self.header_wt[4]),
'formats': ('i8', 'f8', 'i8', 'i8', 'i8')
},
)
self.count = self.dict_list.shape[0]
self.size_stream_id = \
len(np.unique(np.array(self.dict_list[self.header_wt[0]],
dtype=np.int32))) |
def to_cartesian(r, theta, theta_units="radians"):
"""
Converts polar r, theta to cartesian x, y.
"""
assert theta_units in ['radians', 'degrees'],\
"kwarg theta_units must specified in radians or degrees"
# Convert to radians
if theta_units == "degrees":
theta = to_radians(theta)
theta = to_proper_radians(theta)
x = r * cos(theta)
y = r * sin(theta)
return x, y | Converts polar r, theta to cartesian x, y. | Below is the the instruction that describes the task:
### Input:
Converts polar r, theta to cartesian x, y.
### Response:
def to_cartesian(r, theta, theta_units="radians"):
"""
Converts polar r, theta to cartesian x, y.
"""
assert theta_units in ['radians', 'degrees'],\
"kwarg theta_units must specified in radians or degrees"
# Convert to radians
if theta_units == "degrees":
theta = to_radians(theta)
theta = to_proper_radians(theta)
x = r * cos(theta)
y = r * sin(theta)
return x, y |
def SetStorageProfiler(self, storage_profiler):
"""Sets the storage profiler.
Args:
storage_profiler (StorageProfiler): storage profiler.
"""
self._storage_profiler = storage_profiler
if self._storage_file:
self._storage_file.SetStorageProfiler(storage_profiler) | Sets the storage profiler.
Args:
storage_profiler (StorageProfiler): storage profiler. | Below is the the instruction that describes the task:
### Input:
Sets the storage profiler.
Args:
storage_profiler (StorageProfiler): storage profiler.
### Response:
def SetStorageProfiler(self, storage_profiler):
"""Sets the storage profiler.
Args:
storage_profiler (StorageProfiler): storage profiler.
"""
self._storage_profiler = storage_profiler
if self._storage_file:
self._storage_file.SetStorageProfiler(storage_profiler) |
async def unpinChatMessage(self, chat_id):
""" See: https://core.telegram.org/bots/api#unpinchatmessage """
p = _strip(locals())
return await self._api_request('unpinChatMessage', _rectify(p)) | See: https://core.telegram.org/bots/api#unpinchatmessage | Below is the the instruction that describes the task:
### Input:
See: https://core.telegram.org/bots/api#unpinchatmessage
### Response:
async def unpinChatMessage(self, chat_id):
""" See: https://core.telegram.org/bots/api#unpinchatmessage """
p = _strip(locals())
return await self._api_request('unpinChatMessage', _rectify(p)) |
def bookmark_rename(bookmark_id_or_name, new_bookmark_name):
"""
Executor for `globus bookmark rename`
"""
client = get_client()
bookmark_id = resolve_id_or_name(client, bookmark_id_or_name)["id"]
submit_data = {"name": new_bookmark_name}
res = client.update_bookmark(bookmark_id, submit_data)
formatted_print(res, simple_text="Success") | Executor for `globus bookmark rename` | Below is the the instruction that describes the task:
### Input:
Executor for `globus bookmark rename`
### Response:
def bookmark_rename(bookmark_id_or_name, new_bookmark_name):
"""
Executor for `globus bookmark rename`
"""
client = get_client()
bookmark_id = resolve_id_or_name(client, bookmark_id_or_name)["id"]
submit_data = {"name": new_bookmark_name}
res = client.update_bookmark(bookmark_id, submit_data)
formatted_print(res, simple_text="Success") |
def peer(opt_peer, opt_username, opt_password, scope="module"):
'''peer bigip fixture'''
p = BigIP(opt_peer, opt_username, opt_password)
return p | peer bigip fixture | Below is the the instruction that describes the task:
### Input:
peer bigip fixture
### Response:
def peer(opt_peer, opt_username, opt_password, scope="module"):
'''peer bigip fixture'''
p = BigIP(opt_peer, opt_username, opt_password)
return p |
def convert_enamldef_def_to_func(token_stream):
""" A token stream processor which processes all enaml declarative functions
to allow using `def` instead of `func`. It does this by transforming DEF
tokens to NAME within enamldef blocks and then changing the token value to
`func`.
Notes
------
Use this at your own risk! This was a feature intentionally
dismissed by the author of enaml because declarative func's are not the
same as python functions.
"""
in_enamldef = False
depth = 0
for tok in token_stream:
if tok.type == 'ENAMLDEF':
in_enamldef = True
elif tok.type == 'INDENT':
depth += 1
elif in_enamldef and tok.type == 'DEF':
# Since functions are not allowed on the RHS we can
# transform the token type to a NAME so it's picked up by the
# parser as a decl_funcdef instead of funcdef
tok.type = 'NAME'
tok.value = 'func'
elif tok.type == 'DEDENT':
depth -= 1
if depth == 0:
in_enamldef = False
yield tok | A token stream processor which processes all enaml declarative functions
to allow using `def` instead of `func`. It does this by transforming DEF
tokens to NAME within enamldef blocks and then changing the token value to
`func`.
Notes
------
Use this at your own risk! This was a feature intentionally
dismissed by the author of enaml because declarative func's are not the
same as python functions. | Below is the the instruction that describes the task:
### Input:
A token stream processor which processes all enaml declarative functions
to allow using `def` instead of `func`. It does this by transforming DEF
tokens to NAME within enamldef blocks and then changing the token value to
`func`.
Notes
------
Use this at your own risk! This was a feature intentionally
dismissed by the author of enaml because declarative func's are not the
same as python functions.
### Response:
def convert_enamldef_def_to_func(token_stream):
""" A token stream processor which processes all enaml declarative functions
to allow using `def` instead of `func`. It does this by transforming DEF
tokens to NAME within enamldef blocks and then changing the token value to
`func`.
Notes
------
Use this at your own risk! This was a feature intentionally
dismissed by the author of enaml because declarative func's are not the
same as python functions.
"""
in_enamldef = False
depth = 0
for tok in token_stream:
if tok.type == 'ENAMLDEF':
in_enamldef = True
elif tok.type == 'INDENT':
depth += 1
elif in_enamldef and tok.type == 'DEF':
# Since functions are not allowed on the RHS we can
# transform the token type to a NAME so it's picked up by the
# parser as a decl_funcdef instead of funcdef
tok.type = 'NAME'
tok.value = 'func'
elif tok.type == 'DEDENT':
depth -= 1
if depth == 0:
in_enamldef = False
yield tok |
def create_versions(self, project_id, versions):
""" Accepts result of getVersions()
"""
for v in versions:
self.create_version(project_id, v) | Accepts result of getVersions() | Below is the the instruction that describes the task:
### Input:
Accepts result of getVersions()
### Response:
def create_versions(self, project_id, versions):
""" Accepts result of getVersions()
"""
for v in versions:
self.create_version(project_id, v) |
def get_cpu_info(self):
"""
Retrieves CPU info from client
"""
info = snap7.snap7types.S7CpuInfo()
result = self.library.Cli_GetCpuInfo(self.pointer, byref(info))
check_error(result, context="client")
return info | Retrieves CPU info from client | Below is the the instruction that describes the task:
### Input:
Retrieves CPU info from client
### Response:
def get_cpu_info(self):
"""
Retrieves CPU info from client
"""
info = snap7.snap7types.S7CpuInfo()
result = self.library.Cli_GetCpuInfo(self.pointer, byref(info))
check_error(result, context="client")
return info |
def write_updates_to_csv(self, updates):
"""
Given a list of updates, write the updates out to the provided CSV
file.
Args:
updates (list): List of Update objects.
"""
with open(self._csv_file_name, 'w') as csvfile:
csvwriter = self.csv_writer(csvfile)
csvwriter.writerow(CSV_COLUMN_HEADERS)
for update in updates:
row = [
update.name,
update.current_version,
update.new_version,
update.prelease,
]
csvwriter.writerow(row) | Given a list of updates, write the updates out to the provided CSV
file.
Args:
updates (list): List of Update objects. | Below is the the instruction that describes the task:
### Input:
Given a list of updates, write the updates out to the provided CSV
file.
Args:
updates (list): List of Update objects.
### Response:
def write_updates_to_csv(self, updates):
"""
Given a list of updates, write the updates out to the provided CSV
file.
Args:
updates (list): List of Update objects.
"""
with open(self._csv_file_name, 'w') as csvfile:
csvwriter = self.csv_writer(csvfile)
csvwriter.writerow(CSV_COLUMN_HEADERS)
for update in updates:
row = [
update.name,
update.current_version,
update.new_version,
update.prelease,
]
csvwriter.writerow(row) |
def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
# create input matrix and target vector
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d
# estimate output and error
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
# update
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw | Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array) | Below is the the instruction that describes the task:
### Input:
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
### Response:
def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
# create input matrix and target vector
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d
# estimate output and error
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
# update
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw |
def is_python_interpreter(filename):
"""Evaluate wether a file is a python interpreter or not."""
real_filename = os.path.realpath(filename) # To follow symlink if existent
if (not osp.isfile(real_filename) or
not is_python_interpreter_valid_name(filename)):
return False
elif is_pythonw(filename):
if os.name == 'nt':
# pythonw is a binary on Windows
if not encoding.is_text_file(real_filename):
return True
else:
return False
elif sys.platform == 'darwin':
# pythonw is a text file in Anaconda but a binary in
# the system
if is_anaconda() and encoding.is_text_file(real_filename):
return True
elif not encoding.is_text_file(real_filename):
return True
else:
return False
else:
# There's no pythonw in other systems
return False
elif encoding.is_text_file(real_filename):
# At this point we can't have a text file
return False
else:
return check_python_help(filename) | Evaluate wether a file is a python interpreter or not. | Below is the the instruction that describes the task:
### Input:
Evaluate wether a file is a python interpreter or not.
### Response:
def is_python_interpreter(filename):
"""Evaluate wether a file is a python interpreter or not."""
real_filename = os.path.realpath(filename) # To follow symlink if existent
if (not osp.isfile(real_filename) or
not is_python_interpreter_valid_name(filename)):
return False
elif is_pythonw(filename):
if os.name == 'nt':
# pythonw is a binary on Windows
if not encoding.is_text_file(real_filename):
return True
else:
return False
elif sys.platform == 'darwin':
# pythonw is a text file in Anaconda but a binary in
# the system
if is_anaconda() and encoding.is_text_file(real_filename):
return True
elif not encoding.is_text_file(real_filename):
return True
else:
return False
else:
# There's no pythonw in other systems
return False
elif encoding.is_text_file(real_filename):
# At this point we can't have a text file
return False
else:
return check_python_help(filename) |
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states) | Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules. | Below is the the instruction that describes the task:
### Input:
Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
### Response:
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states) |
def mv_normal_like(x, mu, tau):
R"""
Multivariate normal log-likelihood
.. math::
f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter sequence.
- `Tau` : (k,k) Positive definite precision matrix.
.. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.prec_mvnorm(r, mu, tau) for r in x])
else:
return flib.prec_mvnorm(x, mu, tau) | R"""
Multivariate normal log-likelihood
.. math::
f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter sequence.
- `Tau` : (k,k) Positive definite precision matrix.
.. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like` | Below is the the instruction that describes the task:
### Input:
R"""
Multivariate normal log-likelihood
.. math::
f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter sequence.
- `Tau` : (k,k) Positive definite precision matrix.
.. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like`
### Response:
def mv_normal_like(x, mu, tau):
R"""
Multivariate normal log-likelihood
.. math::
f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter sequence.
- `Tau` : (k,k) Positive definite precision matrix.
.. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.prec_mvnorm(r, mu, tau) for r in x])
else:
return flib.prec_mvnorm(x, mu, tau) |
def process_buffer(self, i_block, receive_buffer):
"""Blocking function to process the received heaps.
This is run in an executor.
"""
self._log.info("Worker thread processing block %i", i_block)
time_overall0 = time.time()
time_unpack = 0.0
time_write = 0.0
for i_heap, heap in enumerate(receive_buffer.result()):
# Skip and log any incomplete heaps.
if isinstance(heap, spead2.recv.IncompleteHeap):
self._log.info("Dropped incomplete heap %i", heap.cnt + 1)
continue
# Update the item group from this heap.
items = self._item_group.update(heap)
# Get the time and channel indices from the heap index.
i_chan = i_heap // self._num_buffer_times
i_time = i_heap % self._num_buffer_times
if 'correlator_output_data' in items:
vis_data = items['correlator_output_data'].value['VIS']
if self._block is None:
num_baselines = vis_data.shape[0]
num_pols = vis_data[0].shape[0]
self._block = numpy.zeros((self._num_buffer_times,
self._num_streams,
num_baselines),
dtype=('c8', num_pols))
self._block[:, :, :] = 0 # To make the copies faster.
# Unpack data from the heap into the block to be processed.
time_unpack0 = time.time()
self._block[i_time, i_chan, :] = vis_data
time_unpack += time.time() - time_unpack0
# Check the data for debugging!
val = self._block[i_time, i_chan, -1][-1].real
self._log.debug("Data: %.3f", val)
if self._block is not None:
# Process the buffered data here.
if self._config['process_data']:
pass
# Write the buffered data to storage.
if self._config['write_data']:
time_write0 = time.time()
with open(self._config['filename'], 'ab') as f:
# Don't use pickle, it's really slow (even protocol 4)!
numpy.save(f, self._block, allow_pickle=False)
time_write += time.time() - time_write0
# Report time taken.
time_overall = time.time() - time_overall0
self._log.info("Total processing time: %.1f ms", 1000 * time_overall)
self._log.info("Unpack was %.1f %%", 100 * time_unpack / time_overall)
self._log.info("Write was %.1f %%", 100 * time_write / time_overall)
if time_unpack != 0.0:
self._log.info("Memory speed %.1f MB/s",
(self._block.nbytes * 1e-6) / time_unpack)
if time_write != 0.0:
self._log.info("Write speed %.1f MB/s",
(self._block.nbytes * 1e-6) / time_write) | Blocking function to process the received heaps.
This is run in an executor. | Below is the the instruction that describes the task:
### Input:
Blocking function to process the received heaps.
This is run in an executor.
### Response:
def process_buffer(self, i_block, receive_buffer):
"""Blocking function to process the received heaps.
This is run in an executor.
"""
self._log.info("Worker thread processing block %i", i_block)
time_overall0 = time.time()
time_unpack = 0.0
time_write = 0.0
for i_heap, heap in enumerate(receive_buffer.result()):
# Skip and log any incomplete heaps.
if isinstance(heap, spead2.recv.IncompleteHeap):
self._log.info("Dropped incomplete heap %i", heap.cnt + 1)
continue
# Update the item group from this heap.
items = self._item_group.update(heap)
# Get the time and channel indices from the heap index.
i_chan = i_heap // self._num_buffer_times
i_time = i_heap % self._num_buffer_times
if 'correlator_output_data' in items:
vis_data = items['correlator_output_data'].value['VIS']
if self._block is None:
num_baselines = vis_data.shape[0]
num_pols = vis_data[0].shape[0]
self._block = numpy.zeros((self._num_buffer_times,
self._num_streams,
num_baselines),
dtype=('c8', num_pols))
self._block[:, :, :] = 0 # To make the copies faster.
# Unpack data from the heap into the block to be processed.
time_unpack0 = time.time()
self._block[i_time, i_chan, :] = vis_data
time_unpack += time.time() - time_unpack0
# Check the data for debugging!
val = self._block[i_time, i_chan, -1][-1].real
self._log.debug("Data: %.3f", val)
if self._block is not None:
# Process the buffered data here.
if self._config['process_data']:
pass
# Write the buffered data to storage.
if self._config['write_data']:
time_write0 = time.time()
with open(self._config['filename'], 'ab') as f:
# Don't use pickle, it's really slow (even protocol 4)!
numpy.save(f, self._block, allow_pickle=False)
time_write += time.time() - time_write0
# Report time taken.
time_overall = time.time() - time_overall0
self._log.info("Total processing time: %.1f ms", 1000 * time_overall)
self._log.info("Unpack was %.1f %%", 100 * time_unpack / time_overall)
self._log.info("Write was %.1f %%", 100 * time_write / time_overall)
if time_unpack != 0.0:
self._log.info("Memory speed %.1f MB/s",
(self._block.nbytes * 1e-6) / time_unpack)
if time_write != 0.0:
self._log.info("Write speed %.1f MB/s",
(self._block.nbytes * 1e-6) / time_write) |
def date_from_relative_week_year(base_date, time, dow, ordinal=1):
"""
Converts relative day to time
Eg. this tuesday, last tuesday
"""
# If there is an ordinal (next 3 weeks) => return a start and end range
# Reset date to start of the day
relative_date = datetime(base_date.year, base_date.month, base_date.day)
ord = convert_string_to_number(ordinal)
if dow in year_variations:
if time == 'this' or time == 'coming':
return datetime(relative_date.year, 1, 1)
elif time == 'last' or time == 'previous':
return datetime(relative_date.year - 1, relative_date.month, 1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(ord * 365)
elif time == 'end of the':
return datetime(relative_date.year, 12, 31)
elif dow in month_variations:
if time == 'this':
return datetime(relative_date.year, relative_date.month, relative_date.day)
elif time == 'last' or time == 'previous':
return datetime(relative_date.year, relative_date.month - 1, relative_date.day)
elif time == 'next' or time == 'following':
if relative_date.month + ord >= 12:
month = relative_date.month - 1 + ord
year = relative_date.year + month // 12
month = month % 12 + 1
day = min(relative_date.day, calendar.monthrange(year, month)[1])
return datetime(year, month, day)
else:
return datetime(relative_date.year, relative_date.month + ord, relative_date.day)
elif time == 'end of the':
return datetime(
relative_date.year,
relative_date.month,
calendar.monthrange(relative_date.year, relative_date.month)[1]
)
elif dow in week_variations:
if time == 'this':
return relative_date - timedelta(days=relative_date.weekday())
elif time == 'last' or time == 'previous':
return relative_date - timedelta(weeks=1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(weeks=ord)
elif time == 'end of the':
day_of_week = base_date.weekday()
return day_of_week + timedelta(days=6 - relative_date.weekday())
elif dow in day_variations:
if time == 'this':
return relative_date
elif time == 'last' or time == 'previous':
return relative_date - timedelta(days=1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(days=ord)
elif time == 'end of the':
return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59) | Converts relative day to time
Eg. this tuesday, last tuesday | Below is the the instruction that describes the task:
### Input:
Converts relative day to time
Eg. this tuesday, last tuesday
### Response:
def date_from_relative_week_year(base_date, time, dow, ordinal=1):
"""
Converts relative day to time
Eg. this tuesday, last tuesday
"""
# If there is an ordinal (next 3 weeks) => return a start and end range
# Reset date to start of the day
relative_date = datetime(base_date.year, base_date.month, base_date.day)
ord = convert_string_to_number(ordinal)
if dow in year_variations:
if time == 'this' or time == 'coming':
return datetime(relative_date.year, 1, 1)
elif time == 'last' or time == 'previous':
return datetime(relative_date.year - 1, relative_date.month, 1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(ord * 365)
elif time == 'end of the':
return datetime(relative_date.year, 12, 31)
elif dow in month_variations:
if time == 'this':
return datetime(relative_date.year, relative_date.month, relative_date.day)
elif time == 'last' or time == 'previous':
return datetime(relative_date.year, relative_date.month - 1, relative_date.day)
elif time == 'next' or time == 'following':
if relative_date.month + ord >= 12:
month = relative_date.month - 1 + ord
year = relative_date.year + month // 12
month = month % 12 + 1
day = min(relative_date.day, calendar.monthrange(year, month)[1])
return datetime(year, month, day)
else:
return datetime(relative_date.year, relative_date.month + ord, relative_date.day)
elif time == 'end of the':
return datetime(
relative_date.year,
relative_date.month,
calendar.monthrange(relative_date.year, relative_date.month)[1]
)
elif dow in week_variations:
if time == 'this':
return relative_date - timedelta(days=relative_date.weekday())
elif time == 'last' or time == 'previous':
return relative_date - timedelta(weeks=1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(weeks=ord)
elif time == 'end of the':
day_of_week = base_date.weekday()
return day_of_week + timedelta(days=6 - relative_date.weekday())
elif dow in day_variations:
if time == 'this':
return relative_date
elif time == 'last' or time == 'previous':
return relative_date - timedelta(days=1)
elif time == 'next' or time == 'following':
return relative_date + timedelta(days=ord)
elif time == 'end of the':
return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59) |
def convert_dict_to_datetime(obj_map):
"""converts dictionary representations of datetime back to datetime obj"""
converted_map = {}
for key, value in obj_map.items():
if isinstance(value, dict) and 'tzinfo' in value.keys():
converted_map[key] = datetime.datetime(**value)
elif isinstance(value, dict):
converted_map[key] = convert_dict_to_datetime(value)
elif isinstance(value, list):
updated_list = []
for internal_item in value:
if isinstance(internal_item, dict):
updated_list.append(convert_dict_to_datetime(internal_item))
else:
updated_list.append(internal_item)
converted_map[key] = updated_list
else:
converted_map[key] = value
return converted_map | converts dictionary representations of datetime back to datetime obj | Below is the the instruction that describes the task:
### Input:
converts dictionary representations of datetime back to datetime obj
### Response:
def convert_dict_to_datetime(obj_map):
"""converts dictionary representations of datetime back to datetime obj"""
converted_map = {}
for key, value in obj_map.items():
if isinstance(value, dict) and 'tzinfo' in value.keys():
converted_map[key] = datetime.datetime(**value)
elif isinstance(value, dict):
converted_map[key] = convert_dict_to_datetime(value)
elif isinstance(value, list):
updated_list = []
for internal_item in value:
if isinstance(internal_item, dict):
updated_list.append(convert_dict_to_datetime(internal_item))
else:
updated_list.append(internal_item)
converted_map[key] = updated_list
else:
converted_map[key] = value
return converted_map |
def get_trust_id(self):
"""Gets the ``Trust`` ``Id`` for this authorization.
return: (osid.id.Id) - the trust ``Id``
raise: IllegalState - ``has_trust()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not bool(self._my_map['trustId']):
raise errors.IllegalState('this Authorization has no trust')
else:
return Id(self._my_map['trustId']) | Gets the ``Trust`` ``Id`` for this authorization.
return: (osid.id.Id) - the trust ``Id``
raise: IllegalState - ``has_trust()`` is ``false``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the ``Trust`` ``Id`` for this authorization.
return: (osid.id.Id) - the trust ``Id``
raise: IllegalState - ``has_trust()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_trust_id(self):
"""Gets the ``Trust`` ``Id`` for this authorization.
return: (osid.id.Id) - the trust ``Id``
raise: IllegalState - ``has_trust()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not bool(self._my_map['trustId']):
raise errors.IllegalState('this Authorization has no trust')
else:
return Id(self._my_map['trustId']) |
def setTransducer(self, edfsignal, transducer):
"""
Sets the transducer of signal edfsignal
:param edfsignal: int
:param transducer: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['transducer'] = transducer
self.update_header() | Sets the transducer of signal edfsignal
:param edfsignal: int
:param transducer: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. | Below is the the instruction that describes the task:
### Input:
Sets the transducer of signal edfsignal
:param edfsignal: int
:param transducer: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
### Response:
def setTransducer(self, edfsignal, transducer):
"""
Sets the transducer of signal edfsignal
:param edfsignal: int
:param transducer: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['transducer'] = transducer
self.update_header() |
def _NormalizedVolumeIdentifiers(
self, volume_system, volume_identifiers, prefix='v'):
"""Normalizes volume identifiers.
Args:
volume_system (VolumeSystem): volume system.
volume_identifiers (list[int|str]): allowed volume identifiers, formatted
as an integer or string with prefix.
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: volume identifiers with prefix.
Raises:
ScannerError: if the volume identifier is not supported or no volume
could be found that corresponds with the identifier.
"""
normalized_volume_identifiers = []
for volume_identifier in volume_identifiers:
if isinstance(volume_identifier, int):
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
elif not volume_identifier.startswith(prefix):
try:
volume_identifier = int(volume_identifier, 10)
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
except (TypeError, ValueError):
pass
try:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
except KeyError:
volume = None
if not volume:
raise errors.ScannerError(
'Volume missing for identifier: {0:s}.'.format(volume_identifier))
normalized_volume_identifiers.append(volume_identifier)
return normalized_volume_identifiers | Normalizes volume identifiers.
Args:
volume_system (VolumeSystem): volume system.
volume_identifiers (list[int|str]): allowed volume identifiers, formatted
as an integer or string with prefix.
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: volume identifiers with prefix.
Raises:
ScannerError: if the volume identifier is not supported or no volume
could be found that corresponds with the identifier. | Below is the the instruction that describes the task:
### Input:
Normalizes volume identifiers.
Args:
volume_system (VolumeSystem): volume system.
volume_identifiers (list[int|str]): allowed volume identifiers, formatted
as an integer or string with prefix.
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: volume identifiers with prefix.
Raises:
ScannerError: if the volume identifier is not supported or no volume
could be found that corresponds with the identifier.
### Response:
def _NormalizedVolumeIdentifiers(
self, volume_system, volume_identifiers, prefix='v'):
"""Normalizes volume identifiers.
Args:
volume_system (VolumeSystem): volume system.
volume_identifiers (list[int|str]): allowed volume identifiers, formatted
as an integer or string with prefix.
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: volume identifiers with prefix.
Raises:
ScannerError: if the volume identifier is not supported or no volume
could be found that corresponds with the identifier.
"""
normalized_volume_identifiers = []
for volume_identifier in volume_identifiers:
if isinstance(volume_identifier, int):
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
elif not volume_identifier.startswith(prefix):
try:
volume_identifier = int(volume_identifier, 10)
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
except (TypeError, ValueError):
pass
try:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
except KeyError:
volume = None
if not volume:
raise errors.ScannerError(
'Volume missing for identifier: {0:s}.'.format(volume_identifier))
normalized_volume_identifiers.append(volume_identifier)
return normalized_volume_identifiers |
def items(self):
"""
Get a queryset, cache infos for standardized access to them later
then compute the maximum of entries to define the priority
of each items.
"""
queryset = self.get_queryset()
self.cache_infos(queryset)
self.set_max_entries()
return queryset | Get a queryset, cache infos for standardized access to them later
then compute the maximum of entries to define the priority
of each items. | Below is the the instruction that describes the task:
### Input:
Get a queryset, cache infos for standardized access to them later
then compute the maximum of entries to define the priority
of each items.
### Response:
def items(self):
"""
Get a queryset, cache infos for standardized access to them later
then compute the maximum of entries to define the priority
of each items.
"""
queryset = self.get_queryset()
self.cache_infos(queryset)
self.set_max_entries()
return queryset |
def docx_extraction(docx, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given a .docx file, returns the raw text associated with the given .docx file.
The .docx file may be provided as base64 encoded data or as a filepath.
Example usage:
.. code-block:: python
>>> from indicoio import docx_extraction
>>> results = docx_extraction(docx_file)
:param docx: The docx to be analyzed.
:type docx: str or list of strs
:rtype: dict or list of dicts
"""
docx = docx_preprocess(docx, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version}
results = api_handler(docx, cloud=cloud, api="docxextraction", url_params=url_params, **kwargs)
return results | Given a .docx file, returns the raw text associated with the given .docx file.
The .docx file may be provided as base64 encoded data or as a filepath.
Example usage:
.. code-block:: python
>>> from indicoio import docx_extraction
>>> results = docx_extraction(docx_file)
:param docx: The docx to be analyzed.
:type docx: str or list of strs
:rtype: dict or list of dicts | Below is the the instruction that describes the task:
### Input:
Given a .docx file, returns the raw text associated with the given .docx file.
The .docx file may be provided as base64 encoded data or as a filepath.
Example usage:
.. code-block:: python
>>> from indicoio import docx_extraction
>>> results = docx_extraction(docx_file)
:param docx: The docx to be analyzed.
:type docx: str or list of strs
:rtype: dict or list of dicts
### Response:
def docx_extraction(docx, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given a .docx file, returns the raw text associated with the given .docx file.
The .docx file may be provided as base64 encoded data or as a filepath.
Example usage:
.. code-block:: python
>>> from indicoio import docx_extraction
>>> results = docx_extraction(docx_file)
:param docx: The docx to be analyzed.
:type docx: str or list of strs
:rtype: dict or list of dicts
"""
docx = docx_preprocess(docx, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version}
results = api_handler(docx, cloud=cloud, api="docxextraction", url_params=url_params, **kwargs)
return results |
def from_xso_item(cls, xso_item):
"""
Create a :class:`Item` with the :attr:`jid` set to the
:attr:`.xso.Item.jid` obtained from `xso_item`. Then update that
instance with `xso_item` using :meth:`update_from_xso_item` and return
it.
"""
item = cls(xso_item.jid)
item.update_from_xso_item(xso_item)
return item | Create a :class:`Item` with the :attr:`jid` set to the
:attr:`.xso.Item.jid` obtained from `xso_item`. Then update that
instance with `xso_item` using :meth:`update_from_xso_item` and return
it. | Below is the the instruction that describes the task:
### Input:
Create a :class:`Item` with the :attr:`jid` set to the
:attr:`.xso.Item.jid` obtained from `xso_item`. Then update that
instance with `xso_item` using :meth:`update_from_xso_item` and return
it.
### Response:
def from_xso_item(cls, xso_item):
"""
Create a :class:`Item` with the :attr:`jid` set to the
:attr:`.xso.Item.jid` obtained from `xso_item`. Then update that
instance with `xso_item` using :meth:`update_from_xso_item` and return
it.
"""
item = cls(xso_item.jid)
item.update_from_xso_item(xso_item)
return item |
def put(self, entity):
"""Adds an entity to be committed.
Ensures the transaction is not marked readonly.
Please see documentation at
:meth:`~google.cloud.datastore.batch.Batch.put`
:type entity: :class:`~google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`RuntimeError` if the transaction
is marked ReadOnly
"""
if self._options.HasField("read_only"):
raise RuntimeError("Transaction is read only")
else:
super(Transaction, self).put(entity) | Adds an entity to be committed.
Ensures the transaction is not marked readonly.
Please see documentation at
:meth:`~google.cloud.datastore.batch.Batch.put`
:type entity: :class:`~google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`RuntimeError` if the transaction
is marked ReadOnly | Below is the the instruction that describes the task:
### Input:
Adds an entity to be committed.
Ensures the transaction is not marked readonly.
Please see documentation at
:meth:`~google.cloud.datastore.batch.Batch.put`
:type entity: :class:`~google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`RuntimeError` if the transaction
is marked ReadOnly
### Response:
def put(self, entity):
"""Adds an entity to be committed.
Ensures the transaction is not marked readonly.
Please see documentation at
:meth:`~google.cloud.datastore.batch.Batch.put`
:type entity: :class:`~google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`RuntimeError` if the transaction
is marked ReadOnly
"""
if self._options.HasField("read_only"):
raise RuntimeError("Transaction is read only")
else:
super(Transaction, self).put(entity) |
def disaggregate(self, sitecol, ruptures, iml4, truncnorm, epsilons,
monitor=Monitor()):
"""
Disaggregate (separate) PoE in different contributions.
:param sitecol: a SiteCollection with N sites
:param ruptures: an iterator over ruptures with the same TRT
:param iml4: a 4d array of IMLs of shape (N, R, M, P)
:param truncnorm: an instance of scipy.stats.truncnorm
:param epsilons: the epsilon bins
:param monitor: a Monitor instance
:returns:
an AccumDict with keys (poe, imt, rlzi) and mags, dists, lons, lats
"""
acc = AccumDict(accum=[])
ctx_mon = monitor('disagg_contexts', measuremem=False)
pne_mon = monitor('disaggregate_pne', measuremem=False)
clo_mon = monitor('get_closest', measuremem=False)
for rupture in ruptures:
with ctx_mon:
orig_dctx = DistancesContext(
(param, get_distances(rupture, sitecol, param))
for param in self.REQUIRES_DISTANCES)
self.add_rup_params(rupture)
with clo_mon: # this is faster than computing orig_dctx
closest_points = rupture.surface.get_closest_points(sitecol)
cache = {}
for rlz, gsim in self.gsim_by_rlzi.items():
dctx = orig_dctx.roundup(gsim.minimum_distance)
for m, imt in enumerate(iml4.imts):
for p, poe in enumerate(iml4.poes_disagg):
iml = tuple(iml4.array[:, rlz, m, p])
try:
pne = cache[gsim, imt, iml]
except KeyError:
with pne_mon:
pne = gsim.disaggregate_pne(
rupture, sitecol, dctx, imt, iml,
truncnorm, epsilons)
cache[gsim, imt, iml] = pne
acc[poe, str(imt), rlz].append(pne)
acc['mags'].append(rupture.mag)
acc['dists'].append(getattr(dctx, self.filter_distance))
acc['lons'].append(closest_points.lons)
acc['lats'].append(closest_points.lats)
return acc | Disaggregate (separate) PoE in different contributions.
:param sitecol: a SiteCollection with N sites
:param ruptures: an iterator over ruptures with the same TRT
:param iml4: a 4d array of IMLs of shape (N, R, M, P)
:param truncnorm: an instance of scipy.stats.truncnorm
:param epsilons: the epsilon bins
:param monitor: a Monitor instance
:returns:
an AccumDict with keys (poe, imt, rlzi) and mags, dists, lons, lats | Below is the the instruction that describes the task:
### Input:
Disaggregate (separate) PoE in different contributions.
:param sitecol: a SiteCollection with N sites
:param ruptures: an iterator over ruptures with the same TRT
:param iml4: a 4d array of IMLs of shape (N, R, M, P)
:param truncnorm: an instance of scipy.stats.truncnorm
:param epsilons: the epsilon bins
:param monitor: a Monitor instance
:returns:
an AccumDict with keys (poe, imt, rlzi) and mags, dists, lons, lats
### Response:
def disaggregate(self, sitecol, ruptures, iml4, truncnorm, epsilons,
monitor=Monitor()):
"""
Disaggregate (separate) PoE in different contributions.
:param sitecol: a SiteCollection with N sites
:param ruptures: an iterator over ruptures with the same TRT
:param iml4: a 4d array of IMLs of shape (N, R, M, P)
:param truncnorm: an instance of scipy.stats.truncnorm
:param epsilons: the epsilon bins
:param monitor: a Monitor instance
:returns:
an AccumDict with keys (poe, imt, rlzi) and mags, dists, lons, lats
"""
acc = AccumDict(accum=[])
ctx_mon = monitor('disagg_contexts', measuremem=False)
pne_mon = monitor('disaggregate_pne', measuremem=False)
clo_mon = monitor('get_closest', measuremem=False)
for rupture in ruptures:
with ctx_mon:
orig_dctx = DistancesContext(
(param, get_distances(rupture, sitecol, param))
for param in self.REQUIRES_DISTANCES)
self.add_rup_params(rupture)
with clo_mon: # this is faster than computing orig_dctx
closest_points = rupture.surface.get_closest_points(sitecol)
cache = {}
for rlz, gsim in self.gsim_by_rlzi.items():
dctx = orig_dctx.roundup(gsim.minimum_distance)
for m, imt in enumerate(iml4.imts):
for p, poe in enumerate(iml4.poes_disagg):
iml = tuple(iml4.array[:, rlz, m, p])
try:
pne = cache[gsim, imt, iml]
except KeyError:
with pne_mon:
pne = gsim.disaggregate_pne(
rupture, sitecol, dctx, imt, iml,
truncnorm, epsilons)
cache[gsim, imt, iml] = pne
acc[poe, str(imt), rlz].append(pne)
acc['mags'].append(rupture.mag)
acc['dists'].append(getattr(dctx, self.filter_distance))
acc['lons'].append(closest_points.lons)
acc['lats'].append(closest_points.lats)
return acc |
def format_time(x):
"""Formats date values
This function formats :class:`datetime.datetime` and
:class:`datetime.timedelta` objects (and the corresponding numpy objects)
using the :func:`xarray.core.formatting.format_timestamp` and the
:func:`xarray.core.formatting.format_timedelta` functions.
Parameters
----------
x: object
The value to format. If not a time object, the value is returned
Returns
-------
str or `x`
Either the formatted time object or the initial `x`"""
if isinstance(x, (datetime64, datetime)):
return format_timestamp(x)
elif isinstance(x, (timedelta64, timedelta)):
return format_timedelta(x)
elif isinstance(x, ndarray):
return list(x) if x.ndim else x[()]
return x | Formats date values
This function formats :class:`datetime.datetime` and
:class:`datetime.timedelta` objects (and the corresponding numpy objects)
using the :func:`xarray.core.formatting.format_timestamp` and the
:func:`xarray.core.formatting.format_timedelta` functions.
Parameters
----------
x: object
The value to format. If not a time object, the value is returned
Returns
-------
str or `x`
Either the formatted time object or the initial `x` | Below is the the instruction that describes the task:
### Input:
Formats date values
This function formats :class:`datetime.datetime` and
:class:`datetime.timedelta` objects (and the corresponding numpy objects)
using the :func:`xarray.core.formatting.format_timestamp` and the
:func:`xarray.core.formatting.format_timedelta` functions.
Parameters
----------
x: object
The value to format. If not a time object, the value is returned
Returns
-------
str or `x`
Either the formatted time object or the initial `x`
### Response:
def format_time(x):
"""Formats date values
This function formats :class:`datetime.datetime` and
:class:`datetime.timedelta` objects (and the corresponding numpy objects)
using the :func:`xarray.core.formatting.format_timestamp` and the
:func:`xarray.core.formatting.format_timedelta` functions.
Parameters
----------
x: object
The value to format. If not a time object, the value is returned
Returns
-------
str or `x`
Either the formatted time object or the initial `x`"""
if isinstance(x, (datetime64, datetime)):
return format_timestamp(x)
elif isinstance(x, (timedelta64, timedelta)):
return format_timedelta(x)
elif isinstance(x, ndarray):
return list(x) if x.ndim else x[()]
return x |
def copy(self: BaseBoardT) -> BaseBoardT:
"""Creates a copy of the board."""
board = type(self)(None)
board.pawns = self.pawns
board.knights = self.knights
board.bishops = self.bishops
board.rooks = self.rooks
board.queens = self.queens
board.kings = self.kings
board.occupied_co[WHITE] = self.occupied_co[WHITE]
board.occupied_co[BLACK] = self.occupied_co[BLACK]
board.occupied = self.occupied
board.promoted = self.promoted
return board | Creates a copy of the board. | Below is the the instruction that describes the task:
### Input:
Creates a copy of the board.
### Response:
def copy(self: BaseBoardT) -> BaseBoardT:
"""Creates a copy of the board."""
board = type(self)(None)
board.pawns = self.pawns
board.knights = self.knights
board.bishops = self.bishops
board.rooks = self.rooks
board.queens = self.queens
board.kings = self.kings
board.occupied_co[WHITE] = self.occupied_co[WHITE]
board.occupied_co[BLACK] = self.occupied_co[BLACK]
board.occupied = self.occupied
board.promoted = self.promoted
return board |
def Convert(self, metadata, config, token=None):
"""Converts DNSClientConfiguration to ExportedDNSClientConfiguration."""
result = ExportedDNSClientConfiguration(
metadata=metadata,
dns_servers=" ".join(config.dns_server),
dns_suffixes=" ".join(config.dns_suffix))
yield result | Converts DNSClientConfiguration to ExportedDNSClientConfiguration. | Below is the the instruction that describes the task:
### Input:
Converts DNSClientConfiguration to ExportedDNSClientConfiguration.
### Response:
def Convert(self, metadata, config, token=None):
"""Converts DNSClientConfiguration to ExportedDNSClientConfiguration."""
result = ExportedDNSClientConfiguration(
metadata=metadata,
dns_servers=" ".join(config.dns_server),
dns_suffixes=" ".join(config.dns_suffix))
yield result |
def get_programs():
"""Returns a generator that yields the available executable programs
:returns: a generator that yields the programs available after a refresh_listing()
:rtype: generator
"""
programs = []
os.environ['PATH'] += os.pathsep + os.getcwd()
for p in os.environ['PATH'].split(os.pathsep):
if path.isdir(p):
for f in os.listdir(p):
if _is_executable(path.join(p, f)):
yield f | Returns a generator that yields the available executable programs
:returns: a generator that yields the programs available after a refresh_listing()
:rtype: generator | Below is the the instruction that describes the task:
### Input:
Returns a generator that yields the available executable programs
:returns: a generator that yields the programs available after a refresh_listing()
:rtype: generator
### Response:
def get_programs():
"""Returns a generator that yields the available executable programs
:returns: a generator that yields the programs available after a refresh_listing()
:rtype: generator
"""
programs = []
os.environ['PATH'] += os.pathsep + os.getcwd()
for p in os.environ['PATH'].split(os.pathsep):
if path.isdir(p):
for f in os.listdir(p):
if _is_executable(path.join(p, f)):
yield f |
def islink(path):
'''
Equivalent to os.path.islink()
'''
if six.PY3 or not salt.utils.platform.is_windows():
return os.path.islink(path)
if not HAS_WIN32FILE:
log.error('Cannot check if %s is a link, missing required modules', path)
if not _is_reparse_point(path):
return False
# check that it is a symlink reparse point (in case it is something else,
# like a mount point)
reparse_data = _get_reparse_data(path)
# sanity check - this should not happen
if not reparse_data:
# not a reparse point
return False
# REPARSE_DATA_BUFFER structure - see
# http://msdn.microsoft.com/en-us/library/ff552012.aspx
# parse the structure header to work out which type of reparse point this is
header_parser = struct.Struct('L')
ReparseTag, = header_parser.unpack(reparse_data[:header_parser.size])
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx
if not ReparseTag & 0xA000FFFF == 0xA000000C:
return False
else:
return True | Equivalent to os.path.islink() | Below is the the instruction that describes the task:
### Input:
Equivalent to os.path.islink()
### Response:
def islink(path):
'''
Equivalent to os.path.islink()
'''
if six.PY3 or not salt.utils.platform.is_windows():
return os.path.islink(path)
if not HAS_WIN32FILE:
log.error('Cannot check if %s is a link, missing required modules', path)
if not _is_reparse_point(path):
return False
# check that it is a symlink reparse point (in case it is something else,
# like a mount point)
reparse_data = _get_reparse_data(path)
# sanity check - this should not happen
if not reparse_data:
# not a reparse point
return False
# REPARSE_DATA_BUFFER structure - see
# http://msdn.microsoft.com/en-us/library/ff552012.aspx
# parse the structure header to work out which type of reparse point this is
header_parser = struct.Struct('L')
ReparseTag, = header_parser.unpack(reparse_data[:header_parser.size])
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx
if not ReparseTag & 0xA000FFFF == 0xA000000C:
return False
else:
return True |
def cp(src, dst, overwrite=False):
"""
Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it?
"""
if not isinstance(src, list):
src = [src]
dst = os.path.expanduser(dst)
dst_folder = os.path.isdir(dst)
if len(src) > 1 and not dst_folder:
raise OSError("Cannot copy multiple item to same file")
for item in src:
source = os.path.expanduser(item)
destination = (dst if not dst_folder else
os.path.join(dst, os.path.basename(source)))
if not overwrite and os.path.exists(destination):
_logger.warning("Not replacing {0} with {1}, overwrite not enabled"
"".format(destination, source))
continue
shutil.copy(source, destination) | Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it? | Below is the the instruction that describes the task:
### Input:
Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it?
### Response:
def cp(src, dst, overwrite=False):
"""
Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it?
"""
if not isinstance(src, list):
src = [src]
dst = os.path.expanduser(dst)
dst_folder = os.path.isdir(dst)
if len(src) > 1 and not dst_folder:
raise OSError("Cannot copy multiple item to same file")
for item in src:
source = os.path.expanduser(item)
destination = (dst if not dst_folder else
os.path.join(dst, os.path.basename(source)))
if not overwrite and os.path.exists(destination):
_logger.warning("Not replacing {0} with {1}, overwrite not enabled"
"".format(destination, source))
continue
shutil.copy(source, destination) |
def choices(cls, blank=False):
""" Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list
"""
choices = sorted([(key, value) for key, value in cls.values.items()], key=lambda x: x[0])
if blank:
choices.insert(0, ('', Enum.Value('', None, '', cls)))
return choices | Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list | Below is the the instruction that describes the task:
### Input:
Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list
### Response:
def choices(cls, blank=False):
""" Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list
"""
choices = sorted([(key, value) for key, value in cls.values.items()], key=lambda x: x[0])
if blank:
choices.insert(0, ('', Enum.Value('', None, '', cls)))
return choices |
def states(self):
"""Returns a set containing the enabled states."""
state_list = []
for state in States:
if state.value & self._states != 0:
state_list.append(state)
if (self._flashing_states & States.FILTER) != 0:
state_list.append(States.FILTER_LOW_SPEED)
return state_list | Returns a set containing the enabled states. | Below is the the instruction that describes the task:
### Input:
Returns a set containing the enabled states.
### Response:
def states(self):
"""Returns a set containing the enabled states."""
state_list = []
for state in States:
if state.value & self._states != 0:
state_list.append(state)
if (self._flashing_states & States.FILTER) != 0:
state_list.append(States.FILTER_LOW_SPEED)
return state_list |
def next(self):
"""
Reads the next dataset row.
:return: the next row
:rtype: Instance
"""
if not self.__has_more():
raise StopIteration()
else:
return javabridge.get_env().get_string(self.__next()) | Reads the next dataset row.
:return: the next row
:rtype: Instance | Below is the the instruction that describes the task:
### Input:
Reads the next dataset row.
:return: the next row
:rtype: Instance
### Response:
def next(self):
"""
Reads the next dataset row.
:return: the next row
:rtype: Instance
"""
if not self.__has_more():
raise StopIteration()
else:
return javabridge.get_env().get_string(self.__next()) |
def xirr(values, dates, guess=0):
"""
Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10) | Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR. | Below is the the instruction that describes the task:
### Input:
Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
### Response:
def xirr(values, dates, guess=0):
"""
Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10) |
def to_price_index(returns, start=100):
"""
Returns a price index given a series of returns.
Args:
* returns: Expects a return series
* start (number): Starting level
Assumes arithmetic returns.
Formula is: cumprod (1+r)
"""
return (returns.replace(to_replace=np.nan, value=0) + 1).cumprod() * start | Returns a price index given a series of returns.
Args:
* returns: Expects a return series
* start (number): Starting level
Assumes arithmetic returns.
Formula is: cumprod (1+r) | Below is the the instruction that describes the task:
### Input:
Returns a price index given a series of returns.
Args:
* returns: Expects a return series
* start (number): Starting level
Assumes arithmetic returns.
Formula is: cumprod (1+r)
### Response:
def to_price_index(returns, start=100):
"""
Returns a price index given a series of returns.
Args:
* returns: Expects a return series
* start (number): Starting level
Assumes arithmetic returns.
Formula is: cumprod (1+r)
"""
return (returns.replace(to_replace=np.nan, value=0) + 1).cumprod() * start |
def get_user_subscription_to_discussion(recID, uid):
"""
Returns the type of subscription for the given user to this
discussion. This does not check authorizations (for eg. if user
was subscribed, but is suddenly no longer authorized).
:param recID: record ID
:param uid: user id
:return:
- 0 if user is not subscribed to discussion
- 1 if user is subscribed, and is allowed to unsubscribe
- 2 if user is subscribed, but cannot unsubscribe
"""
user_email = User.query.get(uid).email
(emails1, emails2) = get_users_subscribed_to_discussion(
recID, check_authorizations=False)
if user_email in emails1:
return 1
elif user_email in emails2:
return 2
else:
return 0 | Returns the type of subscription for the given user to this
discussion. This does not check authorizations (for eg. if user
was subscribed, but is suddenly no longer authorized).
:param recID: record ID
:param uid: user id
:return:
- 0 if user is not subscribed to discussion
- 1 if user is subscribed, and is allowed to unsubscribe
- 2 if user is subscribed, but cannot unsubscribe | Below is the the instruction that describes the task:
### Input:
Returns the type of subscription for the given user to this
discussion. This does not check authorizations (for eg. if user
was subscribed, but is suddenly no longer authorized).
:param recID: record ID
:param uid: user id
:return:
- 0 if user is not subscribed to discussion
- 1 if user is subscribed, and is allowed to unsubscribe
- 2 if user is subscribed, but cannot unsubscribe
### Response:
def get_user_subscription_to_discussion(recID, uid):
"""
Returns the type of subscription for the given user to this
discussion. This does not check authorizations (for eg. if user
was subscribed, but is suddenly no longer authorized).
:param recID: record ID
:param uid: user id
:return:
- 0 if user is not subscribed to discussion
- 1 if user is subscribed, and is allowed to unsubscribe
- 2 if user is subscribed, but cannot unsubscribe
"""
user_email = User.query.get(uid).email
(emails1, emails2) = get_users_subscribed_to_discussion(
recID, check_authorizations=False)
if user_email in emails1:
return 1
elif user_email in emails2:
return 2
else:
return 0 |
def conv_uuid(self, column, name, **kwargs):
"""Convert UUID filter."""
return [f(column, name, **kwargs) for f in self.uuid_filters] | Convert UUID filter. | Below is the the instruction that describes the task:
### Input:
Convert UUID filter.
### Response:
def conv_uuid(self, column, name, **kwargs):
"""Convert UUID filter."""
return [f(column, name, **kwargs) for f in self.uuid_filters] |
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name | Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None | Below is the the instruction that describes the task:
### Input:
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
### Response:
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name |
def get(self, index):
"""
Get the element by index. If index is out of bounds for
the internal list, None is returned. Indexes cannot be
negative.
:param int index: retrieve element by positive index in list
:rtype: SubElement or None
"""
if self and (index <= len(self) -1):
return self._result_cache[index] | Get the element by index. If index is out of bounds for
the internal list, None is returned. Indexes cannot be
negative.
:param int index: retrieve element by positive index in list
:rtype: SubElement or None | Below is the the instruction that describes the task:
### Input:
Get the element by index. If index is out of bounds for
the internal list, None is returned. Indexes cannot be
negative.
:param int index: retrieve element by positive index in list
:rtype: SubElement or None
### Response:
def get(self, index):
"""
Get the element by index. If index is out of bounds for
the internal list, None is returned. Indexes cannot be
negative.
:param int index: retrieve element by positive index in list
:rtype: SubElement or None
"""
if self and (index <= len(self) -1):
return self._result_cache[index] |
def heldout_log_likelihood(self, test_mask=None):
"""
Compute the log likelihood of the masked data given the latent
discrete and continuous states.
"""
if test_mask is None:
# If a test mask is not supplied, use the negation of this object's mask
if self.mask is None:
return 0
else:
test_mask = ~self.mask
xs = np.hstack((self.gaussian_states, self.inputs))
if self.single_emission:
return self.emission_distns[0].\
log_likelihood((xs, self.data), mask=test_mask).sum()
else:
hll = 0
z = self.stateseq
for idx, ed in enumerate(self.emission_distns):
hll += ed.log_likelihood((xs[z == idx], self.data[z == idx]),
mask=test_mask[z == idx]).sum() | Compute the log likelihood of the masked data given the latent
discrete and continuous states. | Below is the the instruction that describes the task:
### Input:
Compute the log likelihood of the masked data given the latent
discrete and continuous states.
### Response:
def heldout_log_likelihood(self, test_mask=None):
"""
Compute the log likelihood of the masked data given the latent
discrete and continuous states.
"""
if test_mask is None:
# If a test mask is not supplied, use the negation of this object's mask
if self.mask is None:
return 0
else:
test_mask = ~self.mask
xs = np.hstack((self.gaussian_states, self.inputs))
if self.single_emission:
return self.emission_distns[0].\
log_likelihood((xs, self.data), mask=test_mask).sum()
else:
hll = 0
z = self.stateseq
for idx, ed in enumerate(self.emission_distns):
hll += ed.log_likelihood((xs[z == idx], self.data[z == idx]),
mask=test_mask[z == idx]).sum() |
def form(value):
"""
Format numbers in a nice way.
>>> form(0)
'0'
>>> form(0.0)
'0.0'
>>> form(0.0001)
'1.000E-04'
>>> form(1003.4)
'1,003'
>>> form(103.4)
'103'
>>> form(9.3)
'9.30000'
>>> form(-1.2)
'-1.2'
"""
if isinstance(value, FLOAT + INT):
if value <= 0:
return str(value)
elif value < .001:
return '%.3E' % value
elif value < 10 and isinstance(value, FLOAT):
return '%.5f' % value
elif value > 1000:
return '{:,d}'.format(int(round(value)))
elif numpy.isnan(value):
return 'NaN'
else: # in the range 10-1000
return str(int(value))
elif isinstance(value, bytes):
return decode(value)
elif isinstance(value, str):
return value
elif isinstance(value, numpy.object_):
return str(value)
elif hasattr(value, '__len__') and len(value) > 1:
return ' '.join(map(form, value))
return str(value) | Format numbers in a nice way.
>>> form(0)
'0'
>>> form(0.0)
'0.0'
>>> form(0.0001)
'1.000E-04'
>>> form(1003.4)
'1,003'
>>> form(103.4)
'103'
>>> form(9.3)
'9.30000'
>>> form(-1.2)
'-1.2' | Below is the the instruction that describes the task:
### Input:
Format numbers in a nice way.
>>> form(0)
'0'
>>> form(0.0)
'0.0'
>>> form(0.0001)
'1.000E-04'
>>> form(1003.4)
'1,003'
>>> form(103.4)
'103'
>>> form(9.3)
'9.30000'
>>> form(-1.2)
'-1.2'
### Response:
def form(value):
"""
Format numbers in a nice way.
>>> form(0)
'0'
>>> form(0.0)
'0.0'
>>> form(0.0001)
'1.000E-04'
>>> form(1003.4)
'1,003'
>>> form(103.4)
'103'
>>> form(9.3)
'9.30000'
>>> form(-1.2)
'-1.2'
"""
if isinstance(value, FLOAT + INT):
if value <= 0:
return str(value)
elif value < .001:
return '%.3E' % value
elif value < 10 and isinstance(value, FLOAT):
return '%.5f' % value
elif value > 1000:
return '{:,d}'.format(int(round(value)))
elif numpy.isnan(value):
return 'NaN'
else: # in the range 10-1000
return str(int(value))
elif isinstance(value, bytes):
return decode(value)
elif isinstance(value, str):
return value
elif isinstance(value, numpy.object_):
return str(value)
elif hasattr(value, '__len__') and len(value) > 1:
return ' '.join(map(form, value))
return str(value) |
def police_priority_map_conform_map_pri1_conform(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
conform = ET.SubElement(police_priority_map, "conform")
map_pri1_conform = ET.SubElement(conform, "map-pri1-conform")
map_pri1_conform.text = kwargs.pop('map_pri1_conform')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def police_priority_map_conform_map_pri1_conform(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
conform = ET.SubElement(police_priority_map, "conform")
map_pri1_conform = ET.SubElement(conform, "map-pri1-conform")
map_pri1_conform.text = kwargs.pop('map_pri1_conform')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def load_preset(self):
"""
Loads preset if it is specified in the .frigg.yml
"""
if 'preset' in self.settings.preview:
with open(os.path.join(os.path.dirname(__file__), 'presets.yaml')) as f:
presets = yaml.load(f.read())
if self.settings.preview['preset'] in presets:
self.preset = presets[self.settings.preview['preset']]
return self.preset | Loads preset if it is specified in the .frigg.yml | Below is the the instruction that describes the task:
### Input:
Loads preset if it is specified in the .frigg.yml
### Response:
def load_preset(self):
"""
Loads preset if it is specified in the .frigg.yml
"""
if 'preset' in self.settings.preview:
with open(os.path.join(os.path.dirname(__file__), 'presets.yaml')) as f:
presets = yaml.load(f.read())
if self.settings.preview['preset'] in presets:
self.preset = presets[self.settings.preview['preset']]
return self.preset |
def summary(self):
'''
Create a dictionary with a summary of the updates in the collection.
Returns:
dict: Summary of the contents of the collection
.. code-block:: cfg
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.summary()
'''
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
if self.count() == 0:
return 'Nothing to return'
# Build a dictionary containing a summary of updates available
results = {'Total': 0,
'Available': 0,
'Downloaded': 0,
'Installed': 0,
'Categories': {},
'Severity': {}}
for update in self.updates:
# Count the total number of updates available
results['Total'] += 1
# Updates available for download
if not salt.utils.data.is_true(update.IsDownloaded) \
and not salt.utils.data.is_true(update.IsInstalled):
results['Available'] += 1
# Updates downloaded awaiting install
if salt.utils.data.is_true(update.IsDownloaded) \
and not salt.utils.data.is_true(update.IsInstalled):
results['Downloaded'] += 1
# Updates installed
if salt.utils.data.is_true(update.IsInstalled):
results['Installed'] += 1
# Add Categories and increment total for each one
# The sum will be more than the total because each update can have
# multiple categories
for category in update.Categories:
if category.Name in results['Categories']:
results['Categories'][category.Name] += 1
else:
results['Categories'][category.Name] = 1
# Add Severity Summary
if update.MsrcSeverity:
if update.MsrcSeverity in results['Severity']:
results['Severity'][update.MsrcSeverity] += 1
else:
results['Severity'][update.MsrcSeverity] = 1
return results | Create a dictionary with a summary of the updates in the collection.
Returns:
dict: Summary of the contents of the collection
.. code-block:: cfg
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.summary() | Below is the the instruction that describes the task:
### Input:
Create a dictionary with a summary of the updates in the collection.
Returns:
dict: Summary of the contents of the collection
.. code-block:: cfg
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.summary()
### Response:
def summary(self):
'''
Create a dictionary with a summary of the updates in the collection.
Returns:
dict: Summary of the contents of the collection
.. code-block:: cfg
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.summary()
'''
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
if self.count() == 0:
return 'Nothing to return'
# Build a dictionary containing a summary of updates available
results = {'Total': 0,
'Available': 0,
'Downloaded': 0,
'Installed': 0,
'Categories': {},
'Severity': {}}
for update in self.updates:
# Count the total number of updates available
results['Total'] += 1
# Updates available for download
if not salt.utils.data.is_true(update.IsDownloaded) \
and not salt.utils.data.is_true(update.IsInstalled):
results['Available'] += 1
# Updates downloaded awaiting install
if salt.utils.data.is_true(update.IsDownloaded) \
and not salt.utils.data.is_true(update.IsInstalled):
results['Downloaded'] += 1
# Updates installed
if salt.utils.data.is_true(update.IsInstalled):
results['Installed'] += 1
# Add Categories and increment total for each one
# The sum will be more than the total because each update can have
# multiple categories
for category in update.Categories:
if category.Name in results['Categories']:
results['Categories'][category.Name] += 1
else:
results['Categories'][category.Name] = 1
# Add Severity Summary
if update.MsrcSeverity:
if update.MsrcSeverity in results['Severity']:
results['Severity'][update.MsrcSeverity] += 1
else:
results['Severity'][update.MsrcSeverity] = 1
return results |
def handle_subscribe(self, request, path):
# type: (Subscribe, List[str]) -> CallbackResponses
"""Add to the list of request to notify, and notify the initial value of
the data held
Args:
request (Subscribe): The subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called
"""
ret = []
if path:
# Recurse down
name = path[0]
if name not in self.children:
self.children[name] = NotifierNode(
getattr(self.data, name, None), self)
ret += self.children[name].handle_subscribe(request, path[1:])
else:
# This is for us
serialized = serialize_object(self.data)
if request.delta:
self.delta_requests.append(request)
ret.append(request.delta_response([[[], serialized]]))
else:
self.update_requests.append(request)
ret.append(request.update_response(serialized))
return ret | Add to the list of request to notify, and notify the initial value of
the data held
Args:
request (Subscribe): The subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called | Below is the the instruction that describes the task:
### Input:
Add to the list of request to notify, and notify the initial value of
the data held
Args:
request (Subscribe): The subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called
### Response:
def handle_subscribe(self, request, path):
# type: (Subscribe, List[str]) -> CallbackResponses
"""Add to the list of request to notify, and notify the initial value of
the data held
Args:
request (Subscribe): The subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called
"""
ret = []
if path:
# Recurse down
name = path[0]
if name not in self.children:
self.children[name] = NotifierNode(
getattr(self.data, name, None), self)
ret += self.children[name].handle_subscribe(request, path[1:])
else:
# This is for us
serialized = serialize_object(self.data)
if request.delta:
self.delta_requests.append(request)
ret.append(request.delta_response([[[], serialized]]))
else:
self.update_requests.append(request)
ret.append(request.update_response(serialized))
return ret |
def has_image(self, name: str) -> bool:
"""
Determines whether the server has a Docker image with a given name.
"""
path = "docker/images/{}".format(name)
r = self.__api.head(path)
if r.status_code == 204:
return True
elif r.status_code == 404:
return False
self.__api.handle_erroneous_response(r) | Determines whether the server has a Docker image with a given name. | Below is the the instruction that describes the task:
### Input:
Determines whether the server has a Docker image with a given name.
### Response:
def has_image(self, name: str) -> bool:
"""
Determines whether the server has a Docker image with a given name.
"""
path = "docker/images/{}".format(name)
r = self.__api.head(path)
if r.status_code == 204:
return True
elif r.status_code == 404:
return False
self.__api.handle_erroneous_response(r) |
def _Close(self):
"""Closes the file-like object."""
# pylint: disable=protected-access
super(RawFile, self)._Close()
for file_object in self._file_objects:
file_object.close()
self._file_objects = [] | Closes the file-like object. | Below is the the instruction that describes the task:
### Input:
Closes the file-like object.
### Response:
def _Close(self):
"""Closes the file-like object."""
# pylint: disable=protected-access
super(RawFile, self)._Close()
for file_object in self._file_objects:
file_object.close()
self._file_objects = [] |
def is_valid_port(instance: int):
"""Validates data is a valid port"""
if not isinstance(instance, (int, str)):
return True
return int(instance) in range(65535) | Validates data is a valid port | Below is the the instruction that describes the task:
### Input:
Validates data is a valid port
### Response:
def is_valid_port(instance: int):
"""Validates data is a valid port"""
if not isinstance(instance, (int, str)):
return True
return int(instance) in range(65535) |
def substitute_environ(self):
"""
Substitute environment variables into settings.
"""
for attr_name in dir(self):
if attr_name.startswith('_') or attr_name.upper() != attr_name:
continue
orig_value = getattr(self, attr_name)
is_required = isinstance(orig_value, Required)
orig_type = orig_value.v_type if is_required else type(orig_value)
env_var_name = self._ENV_PREFIX + attr_name
env_var = os.getenv(env_var_name, None)
if env_var is not None:
if issubclass(orig_type, bool):
env_var = env_var.upper() in ('1', 'TRUE')
elif issubclass(orig_type, int):
env_var = int(env_var)
elif issubclass(orig_type, Path):
env_var = Path(env_var)
elif issubclass(orig_type, bytes):
env_var = env_var.encode()
# could do floats here and lists etc via json
setattr(self, attr_name, env_var)
elif is_required and attr_name not in self._custom_settings:
raise RuntimeError('The required environment variable "{0}" is currently not set, '
'you\'ll need to run `source activate.settings.sh` '
'or you can set that single environment variable with '
'`export {0}="<value>"`'.format(env_var_name)) | Substitute environment variables into settings. | Below is the the instruction that describes the task:
### Input:
Substitute environment variables into settings.
### Response:
def substitute_environ(self):
"""
Substitute environment variables into settings.
"""
for attr_name in dir(self):
if attr_name.startswith('_') or attr_name.upper() != attr_name:
continue
orig_value = getattr(self, attr_name)
is_required = isinstance(orig_value, Required)
orig_type = orig_value.v_type if is_required else type(orig_value)
env_var_name = self._ENV_PREFIX + attr_name
env_var = os.getenv(env_var_name, None)
if env_var is not None:
if issubclass(orig_type, bool):
env_var = env_var.upper() in ('1', 'TRUE')
elif issubclass(orig_type, int):
env_var = int(env_var)
elif issubclass(orig_type, Path):
env_var = Path(env_var)
elif issubclass(orig_type, bytes):
env_var = env_var.encode()
# could do floats here and lists etc via json
setattr(self, attr_name, env_var)
elif is_required and attr_name not in self._custom_settings:
raise RuntimeError('The required environment variable "{0}" is currently not set, '
'you\'ll need to run `source activate.settings.sh` '
'or you can set that single environment variable with '
'`export {0}="<value>"`'.format(env_var_name)) |
def toolchain_spec_prepare_loaderplugins(
toolchain, spec,
loaderplugin_read_key,
handler_sourcepath_key,
loaderplugin_sourcepath_map_key=LOADERPLUGIN_SOURCEPATH_MAPS):
"""
A standard helper function for combining the filtered (e.g. using
``spec_update_sourcepath_filter_loaderplugins``) loaderplugin
sourcepath mappings back into one that is usable with the standard
``toolchain_spec_compile_entries`` function.
Arguments:
toolchain
The toolchain
spec
The spec
loaderplugin_read_key
The read_key associated with the loaderplugin process as set up
for the Toolchain that implemented this. If the toolchain has
this in its compile_entries:
ToolchainSpecCompileEntry('loaderplugin', 'plugsrc', 'plugsink')
The loaderplugin_read_key it must use will be 'plugsrc'.
handler_sourcepath_key
All found handlers will have their handler_sourcepath method be
invoked, and the combined results will be a dict stored in the
spec under that key.
loaderplugin_sourcepath_map_key
It must be the same key to the value produced by
``spec_update_sourcepath_filter_loaderplugins``
"""
# ensure the registry is applied to the spec
registry = spec_update_loaderplugin_registry(
spec, default=toolchain.loaderplugin_registry)
# this one is named like so for the compile entry method
plugin_sourcepath = dict_setget_dict(
spec, loaderplugin_read_key + '_sourcepath')
# the key is supplied by the toolchain that might make use of this
if handler_sourcepath_key:
handler_sourcepath = dict_setget_dict(spec, handler_sourcepath_key)
else:
# provide a null value for this.
handler_sourcepath = {}
for key, value in spec.get(loaderplugin_sourcepath_map_key, {}).items():
handler = registry.get(key)
if handler:
# assume handler will do the job.
logger.debug("found handler for '%s' loader plugin", key)
plugin_sourcepath.update(value)
logger.debug(
"plugin_sourcepath updated with %d keys", len(value))
# TODO figure out how to address the case where the actual
# JavaScript module for the handling wasn't found.
handler_sourcepath.update(
handler.generate_handler_sourcepath(toolchain, spec, value))
else:
logger.warning(
"loaderplugin handler for '%s' not found in loaderplugin "
"registry '%s'; as arguments associated with loader plugins "
"are specific, processing is disabled for this group; the "
"sources referenced by the following names will not be "
"compiled into the build target: %s",
key, registry.registry_name, sorted(value.keys()),
) | A standard helper function for combining the filtered (e.g. using
``spec_update_sourcepath_filter_loaderplugins``) loaderplugin
sourcepath mappings back into one that is usable with the standard
``toolchain_spec_compile_entries`` function.
Arguments:
toolchain
The toolchain
spec
The spec
loaderplugin_read_key
The read_key associated with the loaderplugin process as set up
for the Toolchain that implemented this. If the toolchain has
this in its compile_entries:
ToolchainSpecCompileEntry('loaderplugin', 'plugsrc', 'plugsink')
The loaderplugin_read_key it must use will be 'plugsrc'.
handler_sourcepath_key
All found handlers will have their handler_sourcepath method be
invoked, and the combined results will be a dict stored in the
spec under that key.
loaderplugin_sourcepath_map_key
It must be the same key to the value produced by
``spec_update_sourcepath_filter_loaderplugins`` | Below is the the instruction that describes the task:
### Input:
A standard helper function for combining the filtered (e.g. using
``spec_update_sourcepath_filter_loaderplugins``) loaderplugin
sourcepath mappings back into one that is usable with the standard
``toolchain_spec_compile_entries`` function.
Arguments:
toolchain
The toolchain
spec
The spec
loaderplugin_read_key
The read_key associated with the loaderplugin process as set up
for the Toolchain that implemented this. If the toolchain has
this in its compile_entries:
ToolchainSpecCompileEntry('loaderplugin', 'plugsrc', 'plugsink')
The loaderplugin_read_key it must use will be 'plugsrc'.
handler_sourcepath_key
All found handlers will have their handler_sourcepath method be
invoked, and the combined results will be a dict stored in the
spec under that key.
loaderplugin_sourcepath_map_key
It must be the same key to the value produced by
``spec_update_sourcepath_filter_loaderplugins``
### Response:
def toolchain_spec_prepare_loaderplugins(
toolchain, spec,
loaderplugin_read_key,
handler_sourcepath_key,
loaderplugin_sourcepath_map_key=LOADERPLUGIN_SOURCEPATH_MAPS):
"""
A standard helper function for combining the filtered (e.g. using
``spec_update_sourcepath_filter_loaderplugins``) loaderplugin
sourcepath mappings back into one that is usable with the standard
``toolchain_spec_compile_entries`` function.
Arguments:
toolchain
The toolchain
spec
The spec
loaderplugin_read_key
The read_key associated with the loaderplugin process as set up
for the Toolchain that implemented this. If the toolchain has
this in its compile_entries:
ToolchainSpecCompileEntry('loaderplugin', 'plugsrc', 'plugsink')
The loaderplugin_read_key it must use will be 'plugsrc'.
handler_sourcepath_key
All found handlers will have their handler_sourcepath method be
invoked, and the combined results will be a dict stored in the
spec under that key.
loaderplugin_sourcepath_map_key
It must be the same key to the value produced by
``spec_update_sourcepath_filter_loaderplugins``
"""
# ensure the registry is applied to the spec
registry = spec_update_loaderplugin_registry(
spec, default=toolchain.loaderplugin_registry)
# this one is named like so for the compile entry method
plugin_sourcepath = dict_setget_dict(
spec, loaderplugin_read_key + '_sourcepath')
# the key is supplied by the toolchain that might make use of this
if handler_sourcepath_key:
handler_sourcepath = dict_setget_dict(spec, handler_sourcepath_key)
else:
# provide a null value for this.
handler_sourcepath = {}
for key, value in spec.get(loaderplugin_sourcepath_map_key, {}).items():
handler = registry.get(key)
if handler:
# assume handler will do the job.
logger.debug("found handler for '%s' loader plugin", key)
plugin_sourcepath.update(value)
logger.debug(
"plugin_sourcepath updated with %d keys", len(value))
# TODO figure out how to address the case where the actual
# JavaScript module for the handling wasn't found.
handler_sourcepath.update(
handler.generate_handler_sourcepath(toolchain, spec, value))
else:
logger.warning(
"loaderplugin handler for '%s' not found in loaderplugin "
"registry '%s'; as arguments associated with loader plugins "
"are specific, processing is disabled for this group; the "
"sources referenced by the following names will not be "
"compiled into the build target: %s",
key, registry.registry_name, sorted(value.keys()),
) |
def _iter_qs(self):
"""Iterate over the Q values
"""
# work out how many Qs we need
cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.)
nplanes = int(max(ceil(cumum / self.deltam), 1))
dq = cumum / nplanes # pylint: disable=invalid-name
for i in xrange(nplanes):
yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5)) | Iterate over the Q values | Below is the the instruction that describes the task:
### Input:
Iterate over the Q values
### Response:
def _iter_qs(self):
"""Iterate over the Q values
"""
# work out how many Qs we need
cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.)
nplanes = int(max(ceil(cumum / self.deltam), 1))
dq = cumum / nplanes # pylint: disable=invalid-name
for i in xrange(nplanes):
yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5)) |
def get(self,
singleExposure=False):
"""
*get the orbfitPositions object*
**Key Arguments:**
- ``singleExposure`` -- only execute fot a single exposure (useful for debugging)
**Return:**
- None
**Usage:**
See class docstring
"""
self.log.info('starting the ``get`` method')
if singleExposure:
batchSize = 1
else:
batchSize = int(self.settings["orbfit"]["batch size"])
exposureCount = 1
while exposureCount > 0:
expsoureObjects, astorbString, exposureCount = self._get_exposures_requiring_orbfit_positions(
batchSize=batchSize)
if exposureCount:
orbfitPositions = self._get_orbfit_positions(
expsoureObjects, astorbString)
self._add_orbfit_eph_to_database(
orbfitPositions, expsoureObjects)
if singleExposure:
exposureCount = 0
self.log.info('completed the ``get`` method')
return None | *get the orbfitPositions object*
**Key Arguments:**
- ``singleExposure`` -- only execute fot a single exposure (useful for debugging)
**Return:**
- None
**Usage:**
See class docstring | Below is the the instruction that describes the task:
### Input:
*get the orbfitPositions object*
**Key Arguments:**
- ``singleExposure`` -- only execute fot a single exposure (useful for debugging)
**Return:**
- None
**Usage:**
See class docstring
### Response:
def get(self,
singleExposure=False):
"""
*get the orbfitPositions object*
**Key Arguments:**
- ``singleExposure`` -- only execute fot a single exposure (useful for debugging)
**Return:**
- None
**Usage:**
See class docstring
"""
self.log.info('starting the ``get`` method')
if singleExposure:
batchSize = 1
else:
batchSize = int(self.settings["orbfit"]["batch size"])
exposureCount = 1
while exposureCount > 0:
expsoureObjects, astorbString, exposureCount = self._get_exposures_requiring_orbfit_positions(
batchSize=batchSize)
if exposureCount:
orbfitPositions = self._get_orbfit_positions(
expsoureObjects, astorbString)
self._add_orbfit_eph_to_database(
orbfitPositions, expsoureObjects)
if singleExposure:
exposureCount = 0
self.log.info('completed the ``get`` method')
return None |
def getFixedStar(ID, jd):
""" Returns a fixed star. """
star = swe.sweFixedStar(ID, jd)
_signInfo(star)
return star | Returns a fixed star. | Below is the the instruction that describes the task:
### Input:
Returns a fixed star.
### Response:
def getFixedStar(ID, jd):
""" Returns a fixed star. """
star = swe.sweFixedStar(ID, jd)
_signInfo(star)
return star |
def _calc_lm_step(self, damped_JTJ, grad, subblock=None):
"""Calculates a Levenberg-Marquard step w/o acceleration"""
delta0, res, rank, s = np.linalg.lstsq(damped_JTJ, -0.5*grad,
rcond=self.min_eigval)
if self._fresh_JTJ:
CLOG.debug('%d degenerate of %d total directions' % (
delta0.size-rank, delta0.size))
if subblock is not None:
delta = np.zeros(self.J.shape[0])
delta[subblock] = delta0
else:
delta = delta0.copy()
return delta | Calculates a Levenberg-Marquard step w/o acceleration | Below is the the instruction that describes the task:
### Input:
Calculates a Levenberg-Marquard step w/o acceleration
### Response:
def _calc_lm_step(self, damped_JTJ, grad, subblock=None):
"""Calculates a Levenberg-Marquard step w/o acceleration"""
delta0, res, rank, s = np.linalg.lstsq(damped_JTJ, -0.5*grad,
rcond=self.min_eigval)
if self._fresh_JTJ:
CLOG.debug('%d degenerate of %d total directions' % (
delta0.size-rank, delta0.size))
if subblock is not None:
delta = np.zeros(self.J.shape[0])
delta[subblock] = delta0
else:
delta = delta0.copy()
return delta |
def parse_relation(obj: dict) -> BioCRelation:
"""Deserialize a dict obj to a BioCRelation object"""
rel = BioCRelation()
rel.id = obj['id']
rel.infons = obj['infons']
for node in obj['nodes']:
rel.add_node(BioCNode(node['refid'], node['role']))
return rel | Deserialize a dict obj to a BioCRelation object | Below is the the instruction that describes the task:
### Input:
Deserialize a dict obj to a BioCRelation object
### Response:
def parse_relation(obj: dict) -> BioCRelation:
"""Deserialize a dict obj to a BioCRelation object"""
rel = BioCRelation()
rel.id = obj['id']
rel.infons = obj['infons']
for node in obj['nodes']:
rel.add_node(BioCNode(node['refid'], node['role']))
return rel |
def cancel_bbuild(self, build_execution_configuration_id, **kwargs):
"""
Cancel the build execution defined with given executionConfigurationId.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cancel_bbuild(build_execution_configuration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int build_execution_configuration_id: Build Execution Configuration ID. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs)
else:
(data) = self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs)
return data | Cancel the build execution defined with given executionConfigurationId.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cancel_bbuild(build_execution_configuration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int build_execution_configuration_id: Build Execution Configuration ID. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Cancel the build execution defined with given executionConfigurationId.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cancel_bbuild(build_execution_configuration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int build_execution_configuration_id: Build Execution Configuration ID. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def cancel_bbuild(self, build_execution_configuration_id, **kwargs):
"""
Cancel the build execution defined with given executionConfigurationId.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cancel_bbuild(build_execution_configuration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int build_execution_configuration_id: Build Execution Configuration ID. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs)
else:
(data) = self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs)
return data |
def apply(self, a, return_Ya=False):
r"""Apply the projection to an array.
The computation is carried out without explicitly forming the
matrix corresponding to the projection (which would be an array with
``shape==(N,N)``).
See also :py:meth:`_apply`.
"""
# is projection the zero operator?
if self.V.shape[1] == 0:
Pa = numpy.zeros(a.shape)
if return_Ya:
return Pa, numpy.zeros((0, a.shape[1]))
return Pa
if return_Ya:
x, Ya = self._apply(a, return_Ya=return_Ya)
else:
x = self._apply(a)
for i in range(self.iterations-1):
z = a - x
w = self._apply(z)
x = x + w
if return_Ya:
return x, Ya
return x | r"""Apply the projection to an array.
The computation is carried out without explicitly forming the
matrix corresponding to the projection (which would be an array with
``shape==(N,N)``).
See also :py:meth:`_apply`. | Below is the the instruction that describes the task:
### Input:
r"""Apply the projection to an array.
The computation is carried out without explicitly forming the
matrix corresponding to the projection (which would be an array with
``shape==(N,N)``).
See also :py:meth:`_apply`.
### Response:
def apply(self, a, return_Ya=False):
r"""Apply the projection to an array.
The computation is carried out without explicitly forming the
matrix corresponding to the projection (which would be an array with
``shape==(N,N)``).
See also :py:meth:`_apply`.
"""
# is projection the zero operator?
if self.V.shape[1] == 0:
Pa = numpy.zeros(a.shape)
if return_Ya:
return Pa, numpy.zeros((0, a.shape[1]))
return Pa
if return_Ya:
x, Ya = self._apply(a, return_Ya=return_Ya)
else:
x = self._apply(a)
for i in range(self.iterations-1):
z = a - x
w = self._apply(z)
x = x + w
if return_Ya:
return x, Ya
return x |
def mpstatus_to_json(status):
'''Translate MPStatus in json string'''
msg_keys = list(status.msgs.keys())
data = '{'
for key in msg_keys[:-1]:
data += mavlink_to_json(status.msgs[key]) + ','
data += mavlink_to_json(status.msgs[msg_keys[-1]])
data += '}'
return data | Translate MPStatus in json string | Below is the the instruction that describes the task:
### Input:
Translate MPStatus in json string
### Response:
def mpstatus_to_json(status):
'''Translate MPStatus in json string'''
msg_keys = list(status.msgs.keys())
data = '{'
for key in msg_keys[:-1]:
data += mavlink_to_json(status.msgs[key]) + ','
data += mavlink_to_json(status.msgs[msg_keys[-1]])
data += '}'
return data |
def send_rsp_recv_cmd(self, target, data, timeout):
"""While operating as *target* send response *data* to the remote
device and return new command data if received within
*timeout* seconds.
"""
return super(Device, self).send_rsp_recv_cmd(target, data, timeout) | While operating as *target* send response *data* to the remote
device and return new command data if received within
*timeout* seconds. | Below is the the instruction that describes the task:
### Input:
While operating as *target* send response *data* to the remote
device and return new command data if received within
*timeout* seconds.
### Response:
def send_rsp_recv_cmd(self, target, data, timeout):
"""While operating as *target* send response *data* to the remote
device and return new command data if received within
*timeout* seconds.
"""
return super(Device, self).send_rsp_recv_cmd(target, data, timeout) |
def EndEdit(self, row, col, grid, oldVal=None):
"""
End editing the cell. This function must check if the current
value of the editing control is valid and different from the
original value (available as oldval in its string form.) If
it has not changed then simply return None, otherwise return
the value in its string form.
*Must Override*
"""
# Mirror our changes onto the main_window's code bar
self._tc.Unbind(wx.EVT_KEY_UP)
self.ApplyEdit(row, col, grid)
del self._col
del self._row
del self._grid | End editing the cell. This function must check if the current
value of the editing control is valid and different from the
original value (available as oldval in its string form.) If
it has not changed then simply return None, otherwise return
the value in its string form.
*Must Override* | Below is the the instruction that describes the task:
### Input:
End editing the cell. This function must check if the current
value of the editing control is valid and different from the
original value (available as oldval in its string form.) If
it has not changed then simply return None, otherwise return
the value in its string form.
*Must Override*
### Response:
def EndEdit(self, row, col, grid, oldVal=None):
"""
End editing the cell. This function must check if the current
value of the editing control is valid and different from the
original value (available as oldval in its string form.) If
it has not changed then simply return None, otherwise return
the value in its string form.
*Must Override*
"""
# Mirror our changes onto the main_window's code bar
self._tc.Unbind(wx.EVT_KEY_UP)
self.ApplyEdit(row, col, grid)
del self._col
del self._row
del self._grid |
def get_bucket_notification(self, bucket_name):
"""
Get notifications configured for the given bucket.
:param bucket_name: Bucket name.
"""
is_valid_bucket_name(bucket_name)
response = self._url_open(
"GET",
bucket_name=bucket_name,
query={"notification": ""},
)
data = response.data.decode('utf-8')
return parse_get_bucket_notification(data) | Get notifications configured for the given bucket.
:param bucket_name: Bucket name. | Below is the the instruction that describes the task:
### Input:
Get notifications configured for the given bucket.
:param bucket_name: Bucket name.
### Response:
def get_bucket_notification(self, bucket_name):
"""
Get notifications configured for the given bucket.
:param bucket_name: Bucket name.
"""
is_valid_bucket_name(bucket_name)
response = self._url_open(
"GET",
bucket_name=bucket_name,
query={"notification": ""},
)
data = response.data.decode('utf-8')
return parse_get_bucket_notification(data) |
def operation_name(operation, ns):
"""
Convert an operation, obj(s) pair into a swagger operation id.
For compatability with Bravado, we want to use underscores instead of dots and
verb-friendly names. Example:
foo.retrieve => client.foo.retrieve()
foo.search_for.bar => client.foo.search_for_bars()
"""
verb = operation.value.name
if ns.object_:
return "{}_{}".format(verb, pluralize(ns.object_name))
else:
return verb | Convert an operation, obj(s) pair into a swagger operation id.
For compatability with Bravado, we want to use underscores instead of dots and
verb-friendly names. Example:
foo.retrieve => client.foo.retrieve()
foo.search_for.bar => client.foo.search_for_bars() | Below is the the instruction that describes the task:
### Input:
Convert an operation, obj(s) pair into a swagger operation id.
For compatability with Bravado, we want to use underscores instead of dots and
verb-friendly names. Example:
foo.retrieve => client.foo.retrieve()
foo.search_for.bar => client.foo.search_for_bars()
### Response:
def operation_name(operation, ns):
"""
Convert an operation, obj(s) pair into a swagger operation id.
For compatability with Bravado, we want to use underscores instead of dots and
verb-friendly names. Example:
foo.retrieve => client.foo.retrieve()
foo.search_for.bar => client.foo.search_for_bars()
"""
verb = operation.value.name
if ns.object_:
return "{}_{}".format(verb, pluralize(ns.object_name))
else:
return verb |
def angToPix(nside, lon, lat, nest=False):
"""
Input (lon, lat) in degrees instead of (theta, phi) in radians
"""
theta = np.radians(90. - lat)
phi = np.radians(lon)
return hp.ang2pix(nside, theta, phi, nest=nest) | Input (lon, lat) in degrees instead of (theta, phi) in radians | Below is the the instruction that describes the task:
### Input:
Input (lon, lat) in degrees instead of (theta, phi) in radians
### Response:
def angToPix(nside, lon, lat, nest=False):
"""
Input (lon, lat) in degrees instead of (theta, phi) in radians
"""
theta = np.radians(90. - lat)
phi = np.radians(lon)
return hp.ang2pix(nside, theta, phi, nest=nest) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.