code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def time_limited(limit_seconds, iterable):
"""
Yield items from *iterable* until *limit_seconds* have passed.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = generator()
>>> list(time_limited(0.1, iterable))
[1, 2]
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
"""
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
start_time = monotonic()
for item in iterable:
if monotonic() - start_time > limit_seconds:
break
yield item | Yield items from *iterable* until *limit_seconds* have passed.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = generator()
>>> list(time_limited(0.1, iterable))
[1, 2]
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything. | Below is the the instruction that describes the task:
### Input:
Yield items from *iterable* until *limit_seconds* have passed.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = generator()
>>> list(time_limited(0.1, iterable))
[1, 2]
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
### Response:
def time_limited(limit_seconds, iterable):
"""
Yield items from *iterable* until *limit_seconds* have passed.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = generator()
>>> list(time_limited(0.1, iterable))
[1, 2]
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
"""
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
start_time = monotonic()
for item in iterable:
if monotonic() - start_time > limit_seconds:
break
yield item |
def copy_channel(self, channel, owner, to_channel):
'''
Tag all files in channel <channel> also as channel <to_channel>
:param channel: channel to copy
:param owner: Perform this operation on all packages of this user
:param to_channel: Destination name (may be a channel that already exists)
'''
url = '%s/channels/%s/%s/copy/%s' % (self.domain, owner, channel, to_channel)
res = self.session.post(url)
self._check_response(res, [201]) | Tag all files in channel <channel> also as channel <to_channel>
:param channel: channel to copy
:param owner: Perform this operation on all packages of this user
:param to_channel: Destination name (may be a channel that already exists) | Below is the the instruction that describes the task:
### Input:
Tag all files in channel <channel> also as channel <to_channel>
:param channel: channel to copy
:param owner: Perform this operation on all packages of this user
:param to_channel: Destination name (may be a channel that already exists)
### Response:
def copy_channel(self, channel, owner, to_channel):
'''
Tag all files in channel <channel> also as channel <to_channel>
:param channel: channel to copy
:param owner: Perform this operation on all packages of this user
:param to_channel: Destination name (may be a channel that already exists)
'''
url = '%s/channels/%s/%s/copy/%s' % (self.domain, owner, channel, to_channel)
res = self.session.post(url)
self._check_response(res, [201]) |
def _diagram_canvas_default(self):
""" Trait initialiser """
canvas = Canvas()
for tool in self.tools:
canvas.tools.append(tool(canvas))
return canvas | Trait initialiser | Below is the the instruction that describes the task:
### Input:
Trait initialiser
### Response:
def _diagram_canvas_default(self):
""" Trait initialiser """
canvas = Canvas()
for tool in self.tools:
canvas.tools.append(tool(canvas))
return canvas |
def gen_challenge(self, state):
"""returns the next challenge and increments the seed and index
in the state.
:param state: the state to use for generating the challenge. will
verify the integrity of the state object before using it to generate
a challenge. it will then modify the state by incrementing the seed
and index and resign the state for passing back to the server for
storage
"""
state.checksig(self.key)
if (state.index >= state.n):
raise HeartbeatError("Out of challenges.")
state.seed = MerkleHelper.get_next_seed(self.key, state.seed)
chal = Challenge(state.seed, state.index)
state.index += 1
state.sign(self.key)
return chal | returns the next challenge and increments the seed and index
in the state.
:param state: the state to use for generating the challenge. will
verify the integrity of the state object before using it to generate
a challenge. it will then modify the state by incrementing the seed
and index and resign the state for passing back to the server for
storage | Below is the the instruction that describes the task:
### Input:
returns the next challenge and increments the seed and index
in the state.
:param state: the state to use for generating the challenge. will
verify the integrity of the state object before using it to generate
a challenge. it will then modify the state by incrementing the seed
and index and resign the state for passing back to the server for
storage
### Response:
def gen_challenge(self, state):
"""returns the next challenge and increments the seed and index
in the state.
:param state: the state to use for generating the challenge. will
verify the integrity of the state object before using it to generate
a challenge. it will then modify the state by incrementing the seed
and index and resign the state for passing back to the server for
storage
"""
state.checksig(self.key)
if (state.index >= state.n):
raise HeartbeatError("Out of challenges.")
state.seed = MerkleHelper.get_next_seed(self.key, state.seed)
chal = Challenge(state.seed, state.index)
state.index += 1
state.sign(self.key)
return chal |
def evaluaterforces(Pot,R,z,phi=None,t=0.,v=None):
"""
NAME:
evaluaterforces
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_r(R,z,phi,t)
HISTORY:
2016-06-10 - Written - Bovy (UofT)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
dissipative= _isDissipative(Pot)
if dissipative and v is None:
raise PotentialError("The (list of) Potential instances includes dissipative, but you did not provide the 3D velocity (required for dissipative forces")
if isList:
sum= 0.
for pot in Pot:
if isinstance(pot,DissipativeForce):
sum+= pot.rforce(R,z,phi=phi,t=t,v=v,use_physical=False)
else:
sum+= pot.rforce(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.rforce(R,z,phi=phi,t=t,use_physical=False)
elif isinstance(Pot,DissipativeForce):
return Pot.rforce(R,z,phi=phi,t=t,v=v,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluaterforces' is neither a Potential-instance or a list of such instances") | NAME:
evaluaterforces
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_r(R,z,phi,t)
HISTORY:
2016-06-10 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
evaluaterforces
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_r(R,z,phi,t)
HISTORY:
2016-06-10 - Written - Bovy (UofT)
### Response:
def evaluaterforces(Pot,R,z,phi=None,t=0.,v=None):
"""
NAME:
evaluaterforces
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_r(R,z,phi,t)
HISTORY:
2016-06-10 - Written - Bovy (UofT)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
dissipative= _isDissipative(Pot)
if dissipative and v is None:
raise PotentialError("The (list of) Potential instances includes dissipative, but you did not provide the 3D velocity (required for dissipative forces")
if isList:
sum= 0.
for pot in Pot:
if isinstance(pot,DissipativeForce):
sum+= pot.rforce(R,z,phi=phi,t=t,v=v,use_physical=False)
else:
sum+= pot.rforce(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.rforce(R,z,phi=phi,t=t,use_physical=False)
elif isinstance(Pot,DissipativeForce):
return Pot.rforce(R,z,phi=phi,t=t,v=v,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluaterforces' is neither a Potential-instance or a list of such instances") |
async def seen(self, tick, source=None):
'''
Update the .seen interval and optionally a source specific seen node.
'''
await self.set('.seen', tick)
if source is not None:
seen = await self.snap.addNode('meta:seen', (source, self.ndef))
await seen.set('.seen', tick) | Update the .seen interval and optionally a source specific seen node. | Below is the the instruction that describes the task:
### Input:
Update the .seen interval and optionally a source specific seen node.
### Response:
async def seen(self, tick, source=None):
'''
Update the .seen interval and optionally a source specific seen node.
'''
await self.set('.seen', tick)
if source is not None:
seen = await self.snap.addNode('meta:seen', (source, self.ndef))
await seen.set('.seen', tick) |
def isbn(self, fmt: Optional[ISBNFormat] = None,
locale: str = 'en') -> str:
"""Generate ISBN for current locale.
To change ISBN format, pass parameter ``fmt`` with needed value of
the enum object :class:`~mimesis.enums.ISBNFormat`
:param fmt: ISBN format.
:param locale: Locale code.
:return: ISBN.
:raises NonEnumerableError: if fmt is not enum ISBNFormat.
"""
fmt_value = self._validate_enum(item=fmt, enum=ISBNFormat)
mask = ISBN_MASKS[fmt_value].format(
ISBN_GROUPS[locale])
return self.random.custom_code(mask) | Generate ISBN for current locale.
To change ISBN format, pass parameter ``fmt`` with needed value of
the enum object :class:`~mimesis.enums.ISBNFormat`
:param fmt: ISBN format.
:param locale: Locale code.
:return: ISBN.
:raises NonEnumerableError: if fmt is not enum ISBNFormat. | Below is the the instruction that describes the task:
### Input:
Generate ISBN for current locale.
To change ISBN format, pass parameter ``fmt`` with needed value of
the enum object :class:`~mimesis.enums.ISBNFormat`
:param fmt: ISBN format.
:param locale: Locale code.
:return: ISBN.
:raises NonEnumerableError: if fmt is not enum ISBNFormat.
### Response:
def isbn(self, fmt: Optional[ISBNFormat] = None,
locale: str = 'en') -> str:
"""Generate ISBN for current locale.
To change ISBN format, pass parameter ``fmt`` with needed value of
the enum object :class:`~mimesis.enums.ISBNFormat`
:param fmt: ISBN format.
:param locale: Locale code.
:return: ISBN.
:raises NonEnumerableError: if fmt is not enum ISBNFormat.
"""
fmt_value = self._validate_enum(item=fmt, enum=ISBNFormat)
mask = ISBN_MASKS[fmt_value].format(
ISBN_GROUPS[locale])
return self.random.custom_code(mask) |
def create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs):
'''
create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs)
Registers a new process or processes
:Parameters:
* *service* (`string`) -- Service which process will be started
* *agent* (`string`) -- The service identifier (e.g shell_command)
* *title* (`string`) -- Title for the process
* *mode* (`string`) -- production/development
* *service_version* (`string`) -- Version of the service to execute
:Keywords args:
Json value map containing the process input properties
:return: process id
:Example:
.. code-block:: python
process_properties = {"my_input_param" : "1"}
pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service', agent=opereto_client.input['opereto_agent'], **process_properties)
'''
if not agent:
agent = self.input.get('opereto_agent')
if not mode:
mode=self.input.get('opereto_execution_mode') or 'production'
if not service_version:
service_version=self.input.get('opereto_service_version')
request_data = {'service_id': service, 'agents': agent, 'mode': mode, 's_version':service_version}
if title:
request_data['name']=title
if self.input.get('pid'):
request_data['pflow_id']=self.input.get('pid')
request_data.update(**kwargs)
ret_data= self._call_rest_api('post', '/processes', data=request_data, error='Failed to create a new process')
if not isinstance(ret_data, list):
raise OperetoClientError(str(ret_data))
pid = ret_data[0]
message = 'New process created for service [%s] [pid = %s] '%(service, pid)
if agent:
message += ' [agent = %s]'%agent
else:
message += ' [agent = any ]'
self.logger.info(message)
return str(pid) | create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs)
Registers a new process or processes
:Parameters:
* *service* (`string`) -- Service which process will be started
* *agent* (`string`) -- The service identifier (e.g shell_command)
* *title* (`string`) -- Title for the process
* *mode* (`string`) -- production/development
* *service_version* (`string`) -- Version of the service to execute
:Keywords args:
Json value map containing the process input properties
:return: process id
:Example:
.. code-block:: python
process_properties = {"my_input_param" : "1"}
pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service', agent=opereto_client.input['opereto_agent'], **process_properties) | Below is the the instruction that describes the task:
### Input:
create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs)
Registers a new process or processes
:Parameters:
* *service* (`string`) -- Service which process will be started
* *agent* (`string`) -- The service identifier (e.g shell_command)
* *title* (`string`) -- Title for the process
* *mode* (`string`) -- production/development
* *service_version* (`string`) -- Version of the service to execute
:Keywords args:
Json value map containing the process input properties
:return: process id
:Example:
.. code-block:: python
process_properties = {"my_input_param" : "1"}
pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service', agent=opereto_client.input['opereto_agent'], **process_properties)
### Response:
def create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs):
'''
create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs)
Registers a new process or processes
:Parameters:
* *service* (`string`) -- Service which process will be started
* *agent* (`string`) -- The service identifier (e.g shell_command)
* *title* (`string`) -- Title for the process
* *mode* (`string`) -- production/development
* *service_version* (`string`) -- Version of the service to execute
:Keywords args:
Json value map containing the process input properties
:return: process id
:Example:
.. code-block:: python
process_properties = {"my_input_param" : "1"}
pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service', agent=opereto_client.input['opereto_agent'], **process_properties)
'''
if not agent:
agent = self.input.get('opereto_agent')
if not mode:
mode=self.input.get('opereto_execution_mode') or 'production'
if not service_version:
service_version=self.input.get('opereto_service_version')
request_data = {'service_id': service, 'agents': agent, 'mode': mode, 's_version':service_version}
if title:
request_data['name']=title
if self.input.get('pid'):
request_data['pflow_id']=self.input.get('pid')
request_data.update(**kwargs)
ret_data= self._call_rest_api('post', '/processes', data=request_data, error='Failed to create a new process')
if not isinstance(ret_data, list):
raise OperetoClientError(str(ret_data))
pid = ret_data[0]
message = 'New process created for service [%s] [pid = %s] '%(service, pid)
if agent:
message += ' [agent = %s]'%agent
else:
message += ' [agent = any ]'
self.logger.info(message)
return str(pid) |
def _shutdown(self, manual):
"""
Shuts down the TLS session and then shuts down the underlying socket
:param manual:
A boolean if the connection was manually shutdown
"""
if self._ssl is None:
return
while True:
result = libssl.SSL_shutdown(self._ssl)
# Don't be noisy if the socket is already closed
try:
self._raw_write()
except (TLSDisconnectError):
pass
if result >= 0:
break
if result < 0:
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
if self._raw_read() != b'':
continue
else:
break
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
continue
else:
handle_openssl_error(0, TLSError)
if manual:
self._local_closed = True
libssl.SSL_free(self._ssl)
self._ssl = None
# BIOs are freed by SSL_free()
self._rbio = None
self._wbio = None
try:
self._socket.shutdown(socket_.SHUT_RDWR)
except (socket_.error):
pass | Shuts down the TLS session and then shuts down the underlying socket
:param manual:
A boolean if the connection was manually shutdown | Below is the the instruction that describes the task:
### Input:
Shuts down the TLS session and then shuts down the underlying socket
:param manual:
A boolean if the connection was manually shutdown
### Response:
def _shutdown(self, manual):
"""
Shuts down the TLS session and then shuts down the underlying socket
:param manual:
A boolean if the connection was manually shutdown
"""
if self._ssl is None:
return
while True:
result = libssl.SSL_shutdown(self._ssl)
# Don't be noisy if the socket is already closed
try:
self._raw_write()
except (TLSDisconnectError):
pass
if result >= 0:
break
if result < 0:
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
if self._raw_read() != b'':
continue
else:
break
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
continue
else:
handle_openssl_error(0, TLSError)
if manual:
self._local_closed = True
libssl.SSL_free(self._ssl)
self._ssl = None
# BIOs are freed by SSL_free()
self._rbio = None
self._wbio = None
try:
self._socket.shutdown(socket_.SHUT_RDWR)
except (socket_.error):
pass |
def get_providing_power_source_type(self):
"""
Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
return POWER_TYPE_MAP[power_status.ACLineStatus] | Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures. | Below is the the instruction that describes the task:
### Input:
Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures.
### Response:
def get_providing_power_source_type(self):
"""
Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
return POWER_TYPE_MAP[power_status.ACLineStatus] |
async def start_all_linking(self, linkcode, group, address=None):
"""Start the All-Linking process with the IM and device."""
_LOGGING.info('Starting the All-Linking process')
if address:
linkdevice = self.plm.devices[Address(address).id]
if not linkdevice:
linkdevice = create(self.plm, address, None, None)
_LOGGING.info('Attempting to link the PLM to device %s. ',
address)
self.plm.start_all_linking(linkcode, group)
asyncio.sleep(.5, loop=self.loop)
linkdevice.enter_linking_mode(group=group)
else:
_LOGGING.info('Starting All-Linking on PLM. '
'Waiting for button press')
self.plm.start_all_linking(linkcode, group)
await asyncio.sleep(self.wait_time, loop=self.loop)
_LOGGING.info('%d devices added to the All-Link Database',
len(self.plm.devices))
await asyncio.sleep(.1, loop=self.loop) | Start the All-Linking process with the IM and device. | Below is the the instruction that describes the task:
### Input:
Start the All-Linking process with the IM and device.
### Response:
async def start_all_linking(self, linkcode, group, address=None):
"""Start the All-Linking process with the IM and device."""
_LOGGING.info('Starting the All-Linking process')
if address:
linkdevice = self.plm.devices[Address(address).id]
if not linkdevice:
linkdevice = create(self.plm, address, None, None)
_LOGGING.info('Attempting to link the PLM to device %s. ',
address)
self.plm.start_all_linking(linkcode, group)
asyncio.sleep(.5, loop=self.loop)
linkdevice.enter_linking_mode(group=group)
else:
_LOGGING.info('Starting All-Linking on PLM. '
'Waiting for button press')
self.plm.start_all_linking(linkcode, group)
await asyncio.sleep(self.wait_time, loop=self.loop)
_LOGGING.info('%d devices added to the All-Link Database',
len(self.plm.devices))
await asyncio.sleep(.1, loop=self.loop) |
def get_messages(self, domain):
"""
Returns all valid messages after operation.
@type domain: str
@rtype: dict
"""
if domain not in self.domains:
raise ValueError('Invalid domain: {0}'.format(domain))
if domain not in self.messages or 'all' not in self.messages[domain]:
self._process_domain(domain)
return self.messages[domain]['all'] | Returns all valid messages after operation.
@type domain: str
@rtype: dict | Below is the the instruction that describes the task:
### Input:
Returns all valid messages after operation.
@type domain: str
@rtype: dict
### Response:
def get_messages(self, domain):
"""
Returns all valid messages after operation.
@type domain: str
@rtype: dict
"""
if domain not in self.domains:
raise ValueError('Invalid domain: {0}'.format(domain))
if domain not in self.messages or 'all' not in self.messages[domain]:
self._process_domain(domain)
return self.messages[domain]['all'] |
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
) | Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only) | Below is the the instruction that describes the task:
### Input:
Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
### Response:
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
) |
def use_plenary_asset_composition_view(self):
"""Pass through to provider AssetCompositionSession.use_plenary_asset_composition_view"""
self._object_views['asset_composition'] = PLENARY
# self._get_provider_session('asset_composition_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_asset_composition_view()
except AttributeError:
pass | Pass through to provider AssetCompositionSession.use_plenary_asset_composition_view | Below is the the instruction that describes the task:
### Input:
Pass through to provider AssetCompositionSession.use_plenary_asset_composition_view
### Response:
def use_plenary_asset_composition_view(self):
"""Pass through to provider AssetCompositionSession.use_plenary_asset_composition_view"""
self._object_views['asset_composition'] = PLENARY
# self._get_provider_session('asset_composition_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_asset_composition_view()
except AttributeError:
pass |
def check_webhook_secret(app_configs=None, **kwargs):
"""
Check that DJSTRIPE_WEBHOOK_SECRET looks correct
"""
from . import settings as djstripe_settings
messages = []
secret = djstripe_settings.WEBHOOK_SECRET
if secret and not secret.startswith("whsec_"):
messages.append(
checks.Warning(
"DJSTRIPE_WEBHOOK_SECRET does not look valid",
hint="It should start with whsec_...",
id="djstripe.W003",
)
)
return messages | Check that DJSTRIPE_WEBHOOK_SECRET looks correct | Below is the the instruction that describes the task:
### Input:
Check that DJSTRIPE_WEBHOOK_SECRET looks correct
### Response:
def check_webhook_secret(app_configs=None, **kwargs):
"""
Check that DJSTRIPE_WEBHOOK_SECRET looks correct
"""
from . import settings as djstripe_settings
messages = []
secret = djstripe_settings.WEBHOOK_SECRET
if secret and not secret.startswith("whsec_"):
messages.append(
checks.Warning(
"DJSTRIPE_WEBHOOK_SECRET does not look valid",
hint="It should start with whsec_...",
id="djstripe.W003",
)
)
return messages |
async def _create_proxy_connection(self, req, *args, **kwargs):
"""
args, kwargs can contain different elements (traces, timeout,...)
depending on aiohttp version
"""
if req.proxy.scheme == 'http':
return await super()._create_proxy_connection(req, *args, **kwargs)
else:
return await self._create_socks_connection(req=req) | args, kwargs can contain different elements (traces, timeout,...)
depending on aiohttp version | Below is the the instruction that describes the task:
### Input:
args, kwargs can contain different elements (traces, timeout,...)
depending on aiohttp version
### Response:
async def _create_proxy_connection(self, req, *args, **kwargs):
"""
args, kwargs can contain different elements (traces, timeout,...)
depending on aiohttp version
"""
if req.proxy.scheme == 'http':
return await super()._create_proxy_connection(req, *args, **kwargs)
else:
return await self._create_socks_connection(req=req) |
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i | Cheap function to invert a hash. | Below is the the instruction that describes the task:
### Input:
Cheap function to invert a hash.
### Response:
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i |
def slaves(self):
'''The list of slave managers of this manager, if any.
This information can also be found by listing the children of this node
that are of type @ref Manager.
'''
with self._mutex:
if not self._slaves:
self._slaves = [c for c in self.children if c.is_manager]
return self._slaves | The list of slave managers of this manager, if any.
This information can also be found by listing the children of this node
that are of type @ref Manager. | Below is the the instruction that describes the task:
### Input:
The list of slave managers of this manager, if any.
This information can also be found by listing the children of this node
that are of type @ref Manager.
### Response:
def slaves(self):
'''The list of slave managers of this manager, if any.
This information can also be found by listing the children of this node
that are of type @ref Manager.
'''
with self._mutex:
if not self._slaves:
self._slaves = [c for c in self.children if c.is_manager]
return self._slaves |
def map_gate(gate: Gate, args: Sequence[Qubits]) -> Circuit:
"""Applies the same gate all input qubits in the argument list.
>>> circ = qf.map_gate(qf.H(), [[0], [1], [2]])
>>> print(circ)
H(0)
H(1)
H(2)
"""
circ = Circuit()
for qubits in args:
circ += gate.relabel(qubits)
return circ | Applies the same gate all input qubits in the argument list.
>>> circ = qf.map_gate(qf.H(), [[0], [1], [2]])
>>> print(circ)
H(0)
H(1)
H(2) | Below is the the instruction that describes the task:
### Input:
Applies the same gate all input qubits in the argument list.
>>> circ = qf.map_gate(qf.H(), [[0], [1], [2]])
>>> print(circ)
H(0)
H(1)
H(2)
### Response:
def map_gate(gate: Gate, args: Sequence[Qubits]) -> Circuit:
"""Applies the same gate all input qubits in the argument list.
>>> circ = qf.map_gate(qf.H(), [[0], [1], [2]])
>>> print(circ)
H(0)
H(1)
H(2)
"""
circ = Circuit()
for qubits in args:
circ += gate.relabel(qubits)
return circ |
def gapfill(model, universal=None, lower_bound=0.05,
penalties=None, demand_reactions=True, exchange_reactions=False,
iterations=1):
"""Perform gapfilling on a model.
See documentation for the class GapFiller.
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model, None
A universal model with reactions that can be used to complete the
model. Only gapfill considering demand and exchange reactions if
left missing.
lower_bound : float
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
iterations : int
The number of rounds of gapfilling to perform. For every iteration,
the penalty for every used reaction increases linearly. This way,
the algorithm is encouraged to search for alternative solutions
which may include previously used reactions. I.e., with enough
iterations pathways including 10 steps will eventually be reported
even if the shortest pathway is a single reaction.
exchange_reactions : bool
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool
Consider adding demand reactions for all metabolites.
Returns
-------
iterable
list of lists with on set of reactions that completes the model per
requested iteration.
Examples
--------
>>> import cobra.test as ct
>>> from cobra import Model
>>> from cobra.flux_analysis import gapfill
>>> model = ct.create_test_model("salmonella")
>>> universal = Model('universal')
>>> universal.add_reactions(model.reactions.GF6PTA.copy())
>>> model.remove_reactions([model.reactions.GF6PTA])
>>> gapfill(model, universal)
"""
gapfiller = GapFiller(model, universal=universal,
lower_bound=lower_bound, penalties=penalties,
demand_reactions=demand_reactions,
exchange_reactions=exchange_reactions)
return gapfiller.fill(iterations=iterations) | Perform gapfilling on a model.
See documentation for the class GapFiller.
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model, None
A universal model with reactions that can be used to complete the
model. Only gapfill considering demand and exchange reactions if
left missing.
lower_bound : float
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
iterations : int
The number of rounds of gapfilling to perform. For every iteration,
the penalty for every used reaction increases linearly. This way,
the algorithm is encouraged to search for alternative solutions
which may include previously used reactions. I.e., with enough
iterations pathways including 10 steps will eventually be reported
even if the shortest pathway is a single reaction.
exchange_reactions : bool
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool
Consider adding demand reactions for all metabolites.
Returns
-------
iterable
list of lists with on set of reactions that completes the model per
requested iteration.
Examples
--------
>>> import cobra.test as ct
>>> from cobra import Model
>>> from cobra.flux_analysis import gapfill
>>> model = ct.create_test_model("salmonella")
>>> universal = Model('universal')
>>> universal.add_reactions(model.reactions.GF6PTA.copy())
>>> model.remove_reactions([model.reactions.GF6PTA])
>>> gapfill(model, universal) | Below is the the instruction that describes the task:
### Input:
Perform gapfilling on a model.
See documentation for the class GapFiller.
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model, None
A universal model with reactions that can be used to complete the
model. Only gapfill considering demand and exchange reactions if
left missing.
lower_bound : float
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
iterations : int
The number of rounds of gapfilling to perform. For every iteration,
the penalty for every used reaction increases linearly. This way,
the algorithm is encouraged to search for alternative solutions
which may include previously used reactions. I.e., with enough
iterations pathways including 10 steps will eventually be reported
even if the shortest pathway is a single reaction.
exchange_reactions : bool
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool
Consider adding demand reactions for all metabolites.
Returns
-------
iterable
list of lists with on set of reactions that completes the model per
requested iteration.
Examples
--------
>>> import cobra.test as ct
>>> from cobra import Model
>>> from cobra.flux_analysis import gapfill
>>> model = ct.create_test_model("salmonella")
>>> universal = Model('universal')
>>> universal.add_reactions(model.reactions.GF6PTA.copy())
>>> model.remove_reactions([model.reactions.GF6PTA])
>>> gapfill(model, universal)
### Response:
def gapfill(model, universal=None, lower_bound=0.05,
penalties=None, demand_reactions=True, exchange_reactions=False,
iterations=1):
"""Perform gapfilling on a model.
See documentation for the class GapFiller.
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model, None
A universal model with reactions that can be used to complete the
model. Only gapfill considering demand and exchange reactions if
left missing.
lower_bound : float
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
iterations : int
The number of rounds of gapfilling to perform. For every iteration,
the penalty for every used reaction increases linearly. This way,
the algorithm is encouraged to search for alternative solutions
which may include previously used reactions. I.e., with enough
iterations pathways including 10 steps will eventually be reported
even if the shortest pathway is a single reaction.
exchange_reactions : bool
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool
Consider adding demand reactions for all metabolites.
Returns
-------
iterable
list of lists with on set of reactions that completes the model per
requested iteration.
Examples
--------
>>> import cobra.test as ct
>>> from cobra import Model
>>> from cobra.flux_analysis import gapfill
>>> model = ct.create_test_model("salmonella")
>>> universal = Model('universal')
>>> universal.add_reactions(model.reactions.GF6PTA.copy())
>>> model.remove_reactions([model.reactions.GF6PTA])
>>> gapfill(model, universal)
"""
gapfiller = GapFiller(model, universal=universal,
lower_bound=lower_bound, penalties=penalties,
demand_reactions=demand_reactions,
exchange_reactions=exchange_reactions)
return gapfiller.fill(iterations=iterations) |
def link(self, source, target):
'creates a hard link `target -> source` (e.g. ln source target)'
return self.operations('link', target.decode(self.encoding),
source.decode(self.encoding)) | creates a hard link `target -> source` (e.g. ln source target) | Below is the the instruction that describes the task:
### Input:
creates a hard link `target -> source` (e.g. ln source target)
### Response:
def link(self, source, target):
'creates a hard link `target -> source` (e.g. ln source target)'
return self.operations('link', target.decode(self.encoding),
source.decode(self.encoding)) |
def run_edisgo_pool(ding0_file_list, run_args_opt,
workers=mp.cpu_count(), worker_lifetime=1):
"""
Use python multiprocessing toolbox for parallelization
Several grids are analyzed in parallel.
Parameters
----------
ding0_file_list : list
Ding0 grid data file names
run_args_opt : list
eDisGo options, see :func:`run_edisgo_basic` and
:func:`run_edisgo_twice`
workers: int
Number of parallel process
worker_lifetime : int
Bunch of grids sequentially analyzed by a worker
Returns
-------
all_costs_before_geno_import : list
Grid extension cost before grid connection of new generators
all_grid_issues_before_geno_import : list
Remaining overloading or over-voltage issues in grid
all_costs : list
Grid extension cost due to grid connection of new generators
all_grid_issues : list
Remaining overloading or over-voltage issues in grid
"""
def collect_pool_results(result):
results.append(result)
results = []
pool = mp.Pool(workers,
maxtasksperchild=worker_lifetime)
for file in ding0_file_list:
edisgo_args = [file] + run_args_opt
pool.apply_async(func=run_edisgo_twice,
args=(edisgo_args,),
callback=collect_pool_results)
pool.close()
pool.join()
# process results data
all_costs_before_geno_import = [r[0] for r in results]
all_grid_issues_before_geno_import = [r[1] for r in results]
all_costs = [r[2] for r in results]
all_grid_issues = [r[3] for r in results]
return all_costs_before_geno_import, all_grid_issues_before_geno_import, \
all_costs, all_grid_issues | Use python multiprocessing toolbox for parallelization
Several grids are analyzed in parallel.
Parameters
----------
ding0_file_list : list
Ding0 grid data file names
run_args_opt : list
eDisGo options, see :func:`run_edisgo_basic` and
:func:`run_edisgo_twice`
workers: int
Number of parallel process
worker_lifetime : int
Bunch of grids sequentially analyzed by a worker
Returns
-------
all_costs_before_geno_import : list
Grid extension cost before grid connection of new generators
all_grid_issues_before_geno_import : list
Remaining overloading or over-voltage issues in grid
all_costs : list
Grid extension cost due to grid connection of new generators
all_grid_issues : list
Remaining overloading or over-voltage issues in grid | Below is the the instruction that describes the task:
### Input:
Use python multiprocessing toolbox for parallelization
Several grids are analyzed in parallel.
Parameters
----------
ding0_file_list : list
Ding0 grid data file names
run_args_opt : list
eDisGo options, see :func:`run_edisgo_basic` and
:func:`run_edisgo_twice`
workers: int
Number of parallel process
worker_lifetime : int
Bunch of grids sequentially analyzed by a worker
Returns
-------
all_costs_before_geno_import : list
Grid extension cost before grid connection of new generators
all_grid_issues_before_geno_import : list
Remaining overloading or over-voltage issues in grid
all_costs : list
Grid extension cost due to grid connection of new generators
all_grid_issues : list
Remaining overloading or over-voltage issues in grid
### Response:
def run_edisgo_pool(ding0_file_list, run_args_opt,
workers=mp.cpu_count(), worker_lifetime=1):
"""
Use python multiprocessing toolbox for parallelization
Several grids are analyzed in parallel.
Parameters
----------
ding0_file_list : list
Ding0 grid data file names
run_args_opt : list
eDisGo options, see :func:`run_edisgo_basic` and
:func:`run_edisgo_twice`
workers: int
Number of parallel process
worker_lifetime : int
Bunch of grids sequentially analyzed by a worker
Returns
-------
all_costs_before_geno_import : list
Grid extension cost before grid connection of new generators
all_grid_issues_before_geno_import : list
Remaining overloading or over-voltage issues in grid
all_costs : list
Grid extension cost due to grid connection of new generators
all_grid_issues : list
Remaining overloading or over-voltage issues in grid
"""
def collect_pool_results(result):
results.append(result)
results = []
pool = mp.Pool(workers,
maxtasksperchild=worker_lifetime)
for file in ding0_file_list:
edisgo_args = [file] + run_args_opt
pool.apply_async(func=run_edisgo_twice,
args=(edisgo_args,),
callback=collect_pool_results)
pool.close()
pool.join()
# process results data
all_costs_before_geno_import = [r[0] for r in results]
all_grid_issues_before_geno_import = [r[1] for r in results]
all_costs = [r[2] for r in results]
all_grid_issues = [r[3] for r in results]
return all_costs_before_geno_import, all_grid_issues_before_geno_import, \
all_costs, all_grid_issues |
def postinit(self, expr=None, globals=None, locals=None):
"""Do some setup after initialisation.
:param expr: The expression to be executed.
:type expr: NodeNG or None
:param globals:The globals dictionary to execute with.
:type globals: NodeNG or None
:param locals: The locals dictionary to execute with.
:type locals: NodeNG or None
"""
self.expr = expr
self.globals = globals
self.locals = locals | Do some setup after initialisation.
:param expr: The expression to be executed.
:type expr: NodeNG or None
:param globals:The globals dictionary to execute with.
:type globals: NodeNG or None
:param locals: The locals dictionary to execute with.
:type locals: NodeNG or None | Below is the the instruction that describes the task:
### Input:
Do some setup after initialisation.
:param expr: The expression to be executed.
:type expr: NodeNG or None
:param globals:The globals dictionary to execute with.
:type globals: NodeNG or None
:param locals: The locals dictionary to execute with.
:type locals: NodeNG or None
### Response:
def postinit(self, expr=None, globals=None, locals=None):
"""Do some setup after initialisation.
:param expr: The expression to be executed.
:type expr: NodeNG or None
:param globals:The globals dictionary to execute with.
:type globals: NodeNG or None
:param locals: The locals dictionary to execute with.
:type locals: NodeNG or None
"""
self.expr = expr
self.globals = globals
self.locals = locals |
def push_tx(self, crypto, tx_hex):
"""
This method is untested.
"""
url = "%s/pushtx" % self.base_url
return self.post_url(url, {'hex': tx_hex}).content | This method is untested. | Below is the the instruction that describes the task:
### Input:
This method is untested.
### Response:
def push_tx(self, crypto, tx_hex):
"""
This method is untested.
"""
url = "%s/pushtx" % self.base_url
return self.post_url(url, {'hex': tx_hex}).content |
def calculate_input(self, buffer):
"""
Calculate how many keystrokes were used in triggering this phrase.
"""
# TODO: This function is unused?
if TriggerMode.ABBREVIATION in self.modes:
if self._should_trigger_abbreviation(buffer):
if self.immediate:
return len(self._get_trigger_abbreviation(buffer))
else:
return len(self._get_trigger_abbreviation(buffer)) + 1
# TODO - re-enable me if restoring predictive functionality
#if TriggerMode.PREDICTIVE in self.modes:
# if self._should_trigger_predictive(buffer):
# return ConfigManager.SETTINGS[PREDICTIVE_LENGTH]
if TriggerMode.HOTKEY in self.modes:
if buffer == '':
return len(self.modifiers) + 1
return self.parent.calculate_input(buffer) | Calculate how many keystrokes were used in triggering this phrase. | Below is the the instruction that describes the task:
### Input:
Calculate how many keystrokes were used in triggering this phrase.
### Response:
def calculate_input(self, buffer):
"""
Calculate how many keystrokes were used in triggering this phrase.
"""
# TODO: This function is unused?
if TriggerMode.ABBREVIATION in self.modes:
if self._should_trigger_abbreviation(buffer):
if self.immediate:
return len(self._get_trigger_abbreviation(buffer))
else:
return len(self._get_trigger_abbreviation(buffer)) + 1
# TODO - re-enable me if restoring predictive functionality
#if TriggerMode.PREDICTIVE in self.modes:
# if self._should_trigger_predictive(buffer):
# return ConfigManager.SETTINGS[PREDICTIVE_LENGTH]
if TriggerMode.HOTKEY in self.modes:
if buffer == '':
return len(self.modifiers) + 1
return self.parent.calculate_input(buffer) |
def quota(ip=None):
"""Check your quota."""
# TODO: Add arbitrary user defined IP check
url = 'http://www.random.org/quota/?format=plain'
data = urlopen(url)
credit = int(data.read().strip())
if data.code == 200:
return credit
else:
return "ERROR: Server responded with code %s" % data.code | Check your quota. | Below is the the instruction that describes the task:
### Input:
Check your quota.
### Response:
def quota(ip=None):
"""Check your quota."""
# TODO: Add arbitrary user defined IP check
url = 'http://www.random.org/quota/?format=plain'
data = urlopen(url)
credit = int(data.read().strip())
if data.code == 200:
return credit
else:
return "ERROR: Server responded with code %s" % data.code |
def get_long_description():
"""Grok the readme, turn it into whine (rst)."""
root_path = get_root_path()
readme_path = os.path.join(root_path, "README.md")
try:
import pypandoc
return pypandoc.convert(readme_path, "rst").strip()
except ImportError:
return "Cloudsmith CLI" | Grok the readme, turn it into whine (rst). | Below is the the instruction that describes the task:
### Input:
Grok the readme, turn it into whine (rst).
### Response:
def get_long_description():
"""Grok the readme, turn it into whine (rst)."""
root_path = get_root_path()
readme_path = os.path.join(root_path, "README.md")
try:
import pypandoc
return pypandoc.convert(readme_path, "rst").strip()
except ImportError:
return "Cloudsmith CLI" |
def HsvToRgb(h, s, v):
'''Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
'''
if s==0: return (v, v, v) # achromatic (gray)
h /= 60.0
h = h % 6.0
i = int(h)
f = h - i
if not(i&1): f = 1-f # if i is even
m = v * (1.0 - s)
n = v * (1.0 - (s * f))
if i==0: return (v, n, m)
if i==1: return (n, v, m)
if i==2: return (m, v, n)
if i==3: return (m, n, v)
if i==4: return (n, m, v)
return (v, m, n) | Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0) | Below is the the instruction that describes the task:
### Input:
Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
### Response:
def HsvToRgb(h, s, v):
'''Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
'''
if s==0: return (v, v, v) # achromatic (gray)
h /= 60.0
h = h % 6.0
i = int(h)
f = h - i
if not(i&1): f = 1-f # if i is even
m = v * (1.0 - s)
n = v * (1.0 - (s * f))
if i==0: return (v, n, m)
if i==1: return (n, v, m)
if i==2: return (m, v, n)
if i==3: return (m, n, v)
if i==4: return (n, m, v)
return (v, m, n) |
def _setup_stats(self):
'''
Sets up the stats collection
'''
self.stats_dict = {}
redis_conn = redis.Redis(host=self.settings['REDIS_HOST'],
port=self.settings['REDIS_PORT'],
db=self.settings.get('REDIS_DB'))
try:
redis_conn.info()
self.logger.debug("Connected to Redis in StatsCollector Setup")
self.redis_conn = redis_conn
except ConnectionError:
self.logger.warn("Failed to connect to Redis in StatsCollector"
" Setup, no stats will be collected")
return
if self.settings['STATS_TOTAL']:
self._setup_stats_total(redis_conn)
if self.settings['STATS_PLUGINS']:
self._setup_stats_plugins(redis_conn) | Sets up the stats collection | Below is the the instruction that describes the task:
### Input:
Sets up the stats collection
### Response:
def _setup_stats(self):
'''
Sets up the stats collection
'''
self.stats_dict = {}
redis_conn = redis.Redis(host=self.settings['REDIS_HOST'],
port=self.settings['REDIS_PORT'],
db=self.settings.get('REDIS_DB'))
try:
redis_conn.info()
self.logger.debug("Connected to Redis in StatsCollector Setup")
self.redis_conn = redis_conn
except ConnectionError:
self.logger.warn("Failed to connect to Redis in StatsCollector"
" Setup, no stats will be collected")
return
if self.settings['STATS_TOTAL']:
self._setup_stats_total(redis_conn)
if self.settings['STATS_PLUGINS']:
self._setup_stats_plugins(redis_conn) |
def check_namespace(namespace_id):
"""
Verify that a namespace ID is well-formed
>>> check_namespace(123)
False
>>> check_namespace(None)
False
>>> check_namespace('')
False
>>> check_namespace('abcd')
True
>>> check_namespace('Abcd')
False
>>> check_namespace('a+bcd')
False
>>> check_namespace('.abcd')
False
>>> check_namespace('abcdabcdabcdabcdabcd')
False
>>> check_namespace('abcdabcdabcdabcdabc')
True
"""
if type(namespace_id) not in [str, unicode]:
return False
if not is_namespace_valid(namespace_id):
return False
return True | Verify that a namespace ID is well-formed
>>> check_namespace(123)
False
>>> check_namespace(None)
False
>>> check_namespace('')
False
>>> check_namespace('abcd')
True
>>> check_namespace('Abcd')
False
>>> check_namespace('a+bcd')
False
>>> check_namespace('.abcd')
False
>>> check_namespace('abcdabcdabcdabcdabcd')
False
>>> check_namespace('abcdabcdabcdabcdabc')
True | Below is the the instruction that describes the task:
### Input:
Verify that a namespace ID is well-formed
>>> check_namespace(123)
False
>>> check_namespace(None)
False
>>> check_namespace('')
False
>>> check_namespace('abcd')
True
>>> check_namespace('Abcd')
False
>>> check_namespace('a+bcd')
False
>>> check_namespace('.abcd')
False
>>> check_namespace('abcdabcdabcdabcdabcd')
False
>>> check_namespace('abcdabcdabcdabcdabc')
True
### Response:
def check_namespace(namespace_id):
"""
Verify that a namespace ID is well-formed
>>> check_namespace(123)
False
>>> check_namespace(None)
False
>>> check_namespace('')
False
>>> check_namespace('abcd')
True
>>> check_namespace('Abcd')
False
>>> check_namespace('a+bcd')
False
>>> check_namespace('.abcd')
False
>>> check_namespace('abcdabcdabcdabcdabcd')
False
>>> check_namespace('abcdabcdabcdabcdabc')
True
"""
if type(namespace_id) not in [str, unicode]:
return False
if not is_namespace_valid(namespace_id):
return False
return True |
def builds(self):
"""Instance depends on the API version:
* 2018-02-01-preview: :class:`BuildsOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.BuildsOperations>`
"""
api_version = self._get_api_version('builds')
if api_version == '2018-02-01-preview':
from .v2018_02_01_preview.operations import BuildsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-02-01-preview: :class:`BuildsOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.BuildsOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2018-02-01-preview: :class:`BuildsOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.BuildsOperations>`
### Response:
def builds(self):
"""Instance depends on the API version:
* 2018-02-01-preview: :class:`BuildsOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.BuildsOperations>`
"""
api_version = self._get_api_version('builds')
if api_version == '2018-02-01-preview':
from .v2018_02_01_preview.operations import BuildsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if new_arg_name is None and old_arg_value is not None:
msg = (
"the '{old_name}' keyword is deprecated and will be "
"removed in a future version. "
"Please take steps to stop the use of '{old_name}'"
).format(old_name=old_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
kwargs[old_arg_name] = old_arg_value
return func(*args, **kwargs)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = ("the {old_name}={old_val!r} keyword is deprecated, "
"use {new_name}={new_val!r} instead"
).format(old_name=old_arg_name,
old_val=old_arg_value,
new_name=new_arg_name,
new_val=new_arg_value)
else:
new_arg_value = old_arg_value
msg = ("the '{old_name}' keyword is deprecated, "
"use '{new_name}' instead"
).format(old_name=old_arg_name,
new_name=new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = ("Can only specify '{old_name}' or '{new_name}', "
"not both").format(old_name=old_arg_name,
new_name=new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg | Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning | Below is the the instruction that describes the task:
### Input:
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
### Response:
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if new_arg_name is None and old_arg_value is not None:
msg = (
"the '{old_name}' keyword is deprecated and will be "
"removed in a future version. "
"Please take steps to stop the use of '{old_name}'"
).format(old_name=old_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
kwargs[old_arg_name] = old_arg_value
return func(*args, **kwargs)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = ("the {old_name}={old_val!r} keyword is deprecated, "
"use {new_name}={new_val!r} instead"
).format(old_name=old_arg_name,
old_val=old_arg_value,
new_name=new_arg_name,
new_val=new_arg_value)
else:
new_arg_value = old_arg_value
msg = ("the '{old_name}' keyword is deprecated, "
"use '{new_name}' instead"
).format(old_name=old_arg_name,
new_name=new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = ("Can only specify '{old_name}' or '{new_name}', "
"not both").format(old_name=old_arg_name,
new_name=new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg |
def set_power_state(self, desired_state):
"""Set power state of this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:raises: InvalidParameterValue if required seamicro parameters are
missing.
:raises: UcsOperationError on an error from UcsHandle Client.
:returns: Power state of the given node
"""
rn_array = [self.helper.service_profile,
ManagedObject(NamingId.LS_POWER).MakeRn()]
try:
ls_power = ucs_helper.get_managed_object(self.helper.handle,
LsPower.ClassId(),
{LsPower.DN: UcsUtils.MakeDn(rn_array)})
if not ls_power:
raise exception.UcsOperationError("set_power_state",
"Failed to get power MO,"
" configure valid service-profile.")
else:
ls_power_set = self.helper.handle.SetManagedObject(
ls_power,
LsPower.ClassId(),
{LsPower.STATE: desired_state},
dumpXml=YesOrNo.TRUE
)
if ls_power_set:
power = ls_power_set.pop()
return power.getattr(LsPower.STATE)
else:
return states.ERROR
except Exception as ex:
raise exception.UcsOperationError("set_power_state",
"Failed to get power MO,"
"configure valid servie-profile.") | Set power state of this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:raises: InvalidParameterValue if required seamicro parameters are
missing.
:raises: UcsOperationError on an error from UcsHandle Client.
:returns: Power state of the given node | Below is the the instruction that describes the task:
### Input:
Set power state of this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:raises: InvalidParameterValue if required seamicro parameters are
missing.
:raises: UcsOperationError on an error from UcsHandle Client.
:returns: Power state of the given node
### Response:
def set_power_state(self, desired_state):
"""Set power state of this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:raises: InvalidParameterValue if required seamicro parameters are
missing.
:raises: UcsOperationError on an error from UcsHandle Client.
:returns: Power state of the given node
"""
rn_array = [self.helper.service_profile,
ManagedObject(NamingId.LS_POWER).MakeRn()]
try:
ls_power = ucs_helper.get_managed_object(self.helper.handle,
LsPower.ClassId(),
{LsPower.DN: UcsUtils.MakeDn(rn_array)})
if not ls_power:
raise exception.UcsOperationError("set_power_state",
"Failed to get power MO,"
" configure valid service-profile.")
else:
ls_power_set = self.helper.handle.SetManagedObject(
ls_power,
LsPower.ClassId(),
{LsPower.STATE: desired_state},
dumpXml=YesOrNo.TRUE
)
if ls_power_set:
power = ls_power_set.pop()
return power.getattr(LsPower.STATE)
else:
return states.ERROR
except Exception as ex:
raise exception.UcsOperationError("set_power_state",
"Failed to get power MO,"
"configure valid servie-profile.") |
def logs_update(self):
"""
Function updates logs.
"""
Gdk.threads_enter()
if not self.debugging:
self.debugging = True
self.debug_btn.set_label('Info logs')
else:
self.debugging = False
self.debug_btn.set_label('Debug logs')
for record in self.debug_logs['logs']:
if self.debugging:
# Create a new root tree element
if getattr(record, 'event_type', '') != "cmd_retcode":
self.store.append([format_entry(record, show_level=True, colorize=True)])
else:
if int(record.levelno) > 10:
self.store.append([format_entry(record, colorize=True)])
Gdk.threads_leave() | Function updates logs. | Below is the the instruction that describes the task:
### Input:
Function updates logs.
### Response:
def logs_update(self):
"""
Function updates logs.
"""
Gdk.threads_enter()
if not self.debugging:
self.debugging = True
self.debug_btn.set_label('Info logs')
else:
self.debugging = False
self.debug_btn.set_label('Debug logs')
for record in self.debug_logs['logs']:
if self.debugging:
# Create a new root tree element
if getattr(record, 'event_type', '') != "cmd_retcode":
self.store.append([format_entry(record, show_level=True, colorize=True)])
else:
if int(record.levelno) > 10:
self.store.append([format_entry(record, colorize=True)])
Gdk.threads_leave() |
def check_positive(value, strict=False):
"""
Checks if variable is positive
@param value: value to check
@type value: C{integer types}, C{float} or C{Decimal}
@return: None when check successful
@raise ValueError: check failed
"""
if not strict and value < 0:
raise ValueError("Value must be positive or zero, not %s" % str(value))
if strict and value <= 0:
raise ValueError("Value must be positive, not %s" % str(value)) | Checks if variable is positive
@param value: value to check
@type value: C{integer types}, C{float} or C{Decimal}
@return: None when check successful
@raise ValueError: check failed | Below is the the instruction that describes the task:
### Input:
Checks if variable is positive
@param value: value to check
@type value: C{integer types}, C{float} or C{Decimal}
@return: None when check successful
@raise ValueError: check failed
### Response:
def check_positive(value, strict=False):
"""
Checks if variable is positive
@param value: value to check
@type value: C{integer types}, C{float} or C{Decimal}
@return: None when check successful
@raise ValueError: check failed
"""
if not strict and value < 0:
raise ValueError("Value must be positive or zero, not %s" % str(value))
if strict and value <= 0:
raise ValueError("Value must be positive, not %s" % str(value)) |
def dag_state(args):
"""
Returns the state of a DagRun at the command line.
>>> airflow dag_state tutorial 2015-01-01T00:00:00.000000
running
"""
dag = get_dag(args)
dr = DagRun.find(dag.dag_id, execution_date=args.execution_date)
print(dr[0].state if len(dr) > 0 else None) | Returns the state of a DagRun at the command line.
>>> airflow dag_state tutorial 2015-01-01T00:00:00.000000
running | Below is the the instruction that describes the task:
### Input:
Returns the state of a DagRun at the command line.
>>> airflow dag_state tutorial 2015-01-01T00:00:00.000000
running
### Response:
def dag_state(args):
"""
Returns the state of a DagRun at the command line.
>>> airflow dag_state tutorial 2015-01-01T00:00:00.000000
running
"""
dag = get_dag(args)
dr = DagRun.find(dag.dag_id, execution_date=args.execution_date)
print(dr[0].state if len(dr) > 0 else None) |
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate surface tension of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
sigma : float
Surface tension of the liquid at given conditions, [N/m]
'''
if method == SIMPLE:
sigmas = [i(T) for i in self.SurfaceTensions]
return mixing_simple(zs, sigmas)
elif method == DIGUILIOTEJA:
return Diguilio_Teja(T=T, xs=zs, sigmas_Tb=self.sigmas_Tb,
Tbs=self.Tbs, Tcs=self.Tcs)
elif method == WINTERFELDSCRIVENDAVIS:
sigmas = [i(T) for i in self.SurfaceTensions]
rhoms = [1./i(T, P) for i in self.VolumeLiquids]
return Winterfeld_Scriven_Davis(zs, sigmas, rhoms)
else:
raise Exception('Method not valid') | r'''Method to calculate surface tension of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
sigma : float
Surface tension of the liquid at given conditions, [N/m] | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate surface tension of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
sigma : float
Surface tension of the liquid at given conditions, [N/m]
### Response:
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate surface tension of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
sigma : float
Surface tension of the liquid at given conditions, [N/m]
'''
if method == SIMPLE:
sigmas = [i(T) for i in self.SurfaceTensions]
return mixing_simple(zs, sigmas)
elif method == DIGUILIOTEJA:
return Diguilio_Teja(T=T, xs=zs, sigmas_Tb=self.sigmas_Tb,
Tbs=self.Tbs, Tcs=self.Tcs)
elif method == WINTERFELDSCRIVENDAVIS:
sigmas = [i(T) for i in self.SurfaceTensions]
rhoms = [1./i(T, P) for i in self.VolumeLiquids]
return Winterfeld_Scriven_Davis(zs, sigmas, rhoms)
else:
raise Exception('Method not valid') |
def _update_dprx(self):
"""Update `dprx`."""
if 'beta' in self.freeparams:
for r in range(self.nsites):
self.dprx['beta'][r] = self.prx[r] * (self.ln_pi_codon[r]
- scipy.dot(self.ln_pi_codon[r], self.prx[r]))
if 'eta' in self.freeparams:
boolterm = scipy.ndarray(N_CODON, dtype='float')
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='raise'):
for i in range(N_NT - 1):
boolterm.fill(0)
for j in range(3):
boolterm += ((i <= CODON_NT_INDEX[j]).astype('float') /
(self.eta[i] - (i == CODON_NT_INDEX[j]).astype(
'float')))
for r in range(self.nsites):
self.dprx['eta'][i][r] = self.prx[r] * (boolterm -
scipy.dot(boolterm, self.prx[r]) / self.prx[r].sum()) | Update `dprx`. | Below is the the instruction that describes the task:
### Input:
Update `dprx`.
### Response:
def _update_dprx(self):
"""Update `dprx`."""
if 'beta' in self.freeparams:
for r in range(self.nsites):
self.dprx['beta'][r] = self.prx[r] * (self.ln_pi_codon[r]
- scipy.dot(self.ln_pi_codon[r], self.prx[r]))
if 'eta' in self.freeparams:
boolterm = scipy.ndarray(N_CODON, dtype='float')
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='raise'):
for i in range(N_NT - 1):
boolterm.fill(0)
for j in range(3):
boolterm += ((i <= CODON_NT_INDEX[j]).astype('float') /
(self.eta[i] - (i == CODON_NT_INDEX[j]).astype(
'float')))
for r in range(self.nsites):
self.dprx['eta'][i][r] = self.prx[r] * (boolterm -
scipy.dot(boolterm, self.prx[r]) / self.prx[r].sum()) |
def __select (self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). """
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select (iwtd, owtd, ewtd, timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
# if we loop back we have to subtract the amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else: # something else caused the select.error, so this really is an exception
raise | This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). | Below is the the instruction that describes the task:
### Input:
This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize).
### Response:
def __select (self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). """
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select (iwtd, owtd, ewtd, timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
# if we loop back we have to subtract the amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else: # something else caused the select.error, so this really is an exception
raise |
def dimension(self, name, copy=True):
"""
Returns the requested :class:`~hypercube.dims.Dimension` object
Parameters
----------
name : str
Name of the :class:`~hypercube.dims.Dimension` object
copy : boolean
Returns a copy of the :class:`~hypercube.dims.Dimension` object if True (Default value = True)
Returns
-------
:class:`~hypercube.dims.Dimension`
A :class:`~hypercube.dims.Dimension` object.
"""
try:
return create_dimension(name, self._dims[name]) if copy else self._dims[name]
except KeyError:
raise KeyError("Dimension '{n}' is not registered "
"on this cube".format(n=name)), None, sys.exc_info()[2] | Returns the requested :class:`~hypercube.dims.Dimension` object
Parameters
----------
name : str
Name of the :class:`~hypercube.dims.Dimension` object
copy : boolean
Returns a copy of the :class:`~hypercube.dims.Dimension` object if True (Default value = True)
Returns
-------
:class:`~hypercube.dims.Dimension`
A :class:`~hypercube.dims.Dimension` object. | Below is the the instruction that describes the task:
### Input:
Returns the requested :class:`~hypercube.dims.Dimension` object
Parameters
----------
name : str
Name of the :class:`~hypercube.dims.Dimension` object
copy : boolean
Returns a copy of the :class:`~hypercube.dims.Dimension` object if True (Default value = True)
Returns
-------
:class:`~hypercube.dims.Dimension`
A :class:`~hypercube.dims.Dimension` object.
### Response:
def dimension(self, name, copy=True):
"""
Returns the requested :class:`~hypercube.dims.Dimension` object
Parameters
----------
name : str
Name of the :class:`~hypercube.dims.Dimension` object
copy : boolean
Returns a copy of the :class:`~hypercube.dims.Dimension` object if True (Default value = True)
Returns
-------
:class:`~hypercube.dims.Dimension`
A :class:`~hypercube.dims.Dimension` object.
"""
try:
return create_dimension(name, self._dims[name]) if copy else self._dims[name]
except KeyError:
raise KeyError("Dimension '{n}' is not registered "
"on this cube".format(n=name)), None, sys.exc_info()[2] |
def cause_info(self, mechanism, purview):
"""Return the cause information for a mechanism over a purview."""
return repertoire_distance(
Direction.CAUSE,
self.cause_repertoire(mechanism, purview),
self.unconstrained_cause_repertoire(purview)
) | Return the cause information for a mechanism over a purview. | Below is the the instruction that describes the task:
### Input:
Return the cause information for a mechanism over a purview.
### Response:
def cause_info(self, mechanism, purview):
"""Return the cause information for a mechanism over a purview."""
return repertoire_distance(
Direction.CAUSE,
self.cause_repertoire(mechanism, purview),
self.unconstrained_cause_repertoire(purview)
) |
def calc_toa_gain_offset(meta):
"""
Compute (gain, offset) tuples for each band of the specified image metadata
"""
# Set satellite index to look up cal factors
sat_index = meta['satid'].upper() + "_" + meta['bandid'].upper()
# Set scale for at sensor radiance
# Eq is:
# L = GAIN * DN * (ACF/EBW) + Offset
# ACF abscal factor from meta data
# EBW effectiveBandwidth from meta data
# Gain provided by abscal from const
# Offset provided by abscal from const
acf = np.asarray(meta['abscalfactor']) # Should be nbands length
ebw = np.asarray(meta['effbandwidth']) # Should be nbands length
gain = np.asarray(constants.DG_ABSCAL_GAIN[sat_index])
scale = (acf / ebw) * gain
offset = np.asarray(constants.DG_ABSCAL_OFFSET[sat_index])
e_sun_index = meta['satid'].upper() + "_" + meta['bandid'].upper()
e_sun = np.asarray(constants.DG_ESUN[e_sun_index])
sun = ephem.Sun()
img_obs = ephem.Observer()
img_obs.lon = meta['latlonhae'][1]
img_obs.lat = meta['latlonhae'][0]
img_obs.elevation = meta['latlonhae'][2]
img_obs.date = datetime.datetime.fromtimestamp(meta['img_datetime_obj_utc']['$date'] / 1000.0).strftime(
'%Y-%m-%d %H:%M:%S.%f')
sun.compute(img_obs)
d_es = sun.earth_distance
# Pull sun elevation from the image metadata
# theta_s can be zenith or elevation - the calc below will us either
# a cos or s in respectively
# theta_s = float(self.meta_dg.IMD.IMAGE.MEANSUNEL)
theta_s = 90 - float(meta['mean_sun_el'])
scale2 = (d_es ** 2 * np.pi) / (e_sun * np.cos(np.deg2rad(theta_s)))
# Return scaled data
# Radiance = Scale * Image + offset, Reflectance = Radiance * Scale2
return zip(scale, scale2, offset) | Compute (gain, offset) tuples for each band of the specified image metadata | Below is the the instruction that describes the task:
### Input:
Compute (gain, offset) tuples for each band of the specified image metadata
### Response:
def calc_toa_gain_offset(meta):
"""
Compute (gain, offset) tuples for each band of the specified image metadata
"""
# Set satellite index to look up cal factors
sat_index = meta['satid'].upper() + "_" + meta['bandid'].upper()
# Set scale for at sensor radiance
# Eq is:
# L = GAIN * DN * (ACF/EBW) + Offset
# ACF abscal factor from meta data
# EBW effectiveBandwidth from meta data
# Gain provided by abscal from const
# Offset provided by abscal from const
acf = np.asarray(meta['abscalfactor']) # Should be nbands length
ebw = np.asarray(meta['effbandwidth']) # Should be nbands length
gain = np.asarray(constants.DG_ABSCAL_GAIN[sat_index])
scale = (acf / ebw) * gain
offset = np.asarray(constants.DG_ABSCAL_OFFSET[sat_index])
e_sun_index = meta['satid'].upper() + "_" + meta['bandid'].upper()
e_sun = np.asarray(constants.DG_ESUN[e_sun_index])
sun = ephem.Sun()
img_obs = ephem.Observer()
img_obs.lon = meta['latlonhae'][1]
img_obs.lat = meta['latlonhae'][0]
img_obs.elevation = meta['latlonhae'][2]
img_obs.date = datetime.datetime.fromtimestamp(meta['img_datetime_obj_utc']['$date'] / 1000.0).strftime(
'%Y-%m-%d %H:%M:%S.%f')
sun.compute(img_obs)
d_es = sun.earth_distance
# Pull sun elevation from the image metadata
# theta_s can be zenith or elevation - the calc below will us either
# a cos or s in respectively
# theta_s = float(self.meta_dg.IMD.IMAGE.MEANSUNEL)
theta_s = 90 - float(meta['mean_sun_el'])
scale2 = (d_es ** 2 * np.pi) / (e_sun * np.cos(np.deg2rad(theta_s)))
# Return scaled data
# Radiance = Scale * Image + offset, Reflectance = Radiance * Scale2
return zip(scale, scale2, offset) |
def create_system(self, new_machine_id=False):
"""
Create the machine via the API
"""
client_hostname = determine_hostname()
machine_id = generate_machine_id(new_machine_id)
branch_info = self.branch_info
if not branch_info:
return False
remote_branch = branch_info['remote_branch']
remote_leaf = branch_info['remote_leaf']
data = {'machine_id': machine_id,
'remote_branch': remote_branch,
'remote_leaf': remote_leaf,
'hostname': client_hostname}
if self.config.display_name is not None:
data['display_name'] = self.config.display_name
data = json.dumps(data)
post_system_url = self.api_url + '/v1/systems'
logger.debug("POST System: %s", post_system_url)
logger.debug(data)
net_logger.info("POST %s", post_system_url)
return self.session.post(post_system_url,
headers={'Content-Type': 'application/json'},
data=data) | Create the machine via the API | Below is the the instruction that describes the task:
### Input:
Create the machine via the API
### Response:
def create_system(self, new_machine_id=False):
"""
Create the machine via the API
"""
client_hostname = determine_hostname()
machine_id = generate_machine_id(new_machine_id)
branch_info = self.branch_info
if not branch_info:
return False
remote_branch = branch_info['remote_branch']
remote_leaf = branch_info['remote_leaf']
data = {'machine_id': machine_id,
'remote_branch': remote_branch,
'remote_leaf': remote_leaf,
'hostname': client_hostname}
if self.config.display_name is not None:
data['display_name'] = self.config.display_name
data = json.dumps(data)
post_system_url = self.api_url + '/v1/systems'
logger.debug("POST System: %s", post_system_url)
logger.debug(data)
net_logger.info("POST %s", post_system_url)
return self.session.post(post_system_url,
headers={'Content-Type': 'application/json'},
data=data) |
def officers(self, num, **kwargs):
"""Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = self._BASE_URI + "company/{}/officers".format(num)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword. | Below is the the instruction that describes the task:
### Input:
Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
### Response:
def officers(self, num, **kwargs):
"""Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = self._BASE_URI + "company/{}/officers".format(num)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res |
def line(x_fn, y_fn, *, options={}, **interact_params):
"""
Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...)
"""
fig = options.get('_fig', False) or _create_fig(options=options)
[line] = (_create_marks(fig=fig, marks=[bq.Lines], options=options))
_add_marks(fig, [line])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix='x')
line.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
line.y = util.maybe_call(y_bound, interact_params, prefix='y')
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...) | Below is the the instruction that describes the task:
### Input:
Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...)
### Response:
def line(x_fn, y_fn, *, options={}, **interact_params):
"""
Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...)
"""
fig = options.get('_fig', False) or _create_fig(options=options)
[line] = (_create_marks(fig=fig, marks=[bq.Lines], options=options))
_add_marks(fig, [line])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix='x')
line.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
line.y = util.maybe_call(y_bound, interact_params, prefix='y')
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) |
def serialize(self, serializable: Optional[Union[SerializableType, List[SerializableType]]]) \
-> PrimitiveJsonType:
"""
Serializes the given serializable object or collection of serializable objects.
:param serializable: the object or objects to serialize
:return: a serialization of the given object
"""
if serializable is None:
# Implements #17
return None
elif isinstance(serializable, List):
return [self.serialize(item) for item in serializable]
else:
serialized = self._create_serialized_container()
for mapping in self._property_mappings:
if mapping.object_property_getter is not None and mapping.serialized_property_setter is not None:
value = mapping.object_property_getter(serializable)
if not (mapping.optional and value is None):
if isinstance(value, type(mapping.collection_factory([]))):
value = list(mapping.collection_iter(value))
encoded_value = self._serialize_property_value(value, mapping.serializer_cls)
mapping.serialized_property_setter(serialized, encoded_value)
return serialized | Serializes the given serializable object or collection of serializable objects.
:param serializable: the object or objects to serialize
:return: a serialization of the given object | Below is the the instruction that describes the task:
### Input:
Serializes the given serializable object or collection of serializable objects.
:param serializable: the object or objects to serialize
:return: a serialization of the given object
### Response:
def serialize(self, serializable: Optional[Union[SerializableType, List[SerializableType]]]) \
-> PrimitiveJsonType:
"""
Serializes the given serializable object or collection of serializable objects.
:param serializable: the object or objects to serialize
:return: a serialization of the given object
"""
if serializable is None:
# Implements #17
return None
elif isinstance(serializable, List):
return [self.serialize(item) for item in serializable]
else:
serialized = self._create_serialized_container()
for mapping in self._property_mappings:
if mapping.object_property_getter is not None and mapping.serialized_property_setter is not None:
value = mapping.object_property_getter(serializable)
if not (mapping.optional and value is None):
if isinstance(value, type(mapping.collection_factory([]))):
value = list(mapping.collection_iter(value))
encoded_value = self._serialize_property_value(value, mapping.serializer_cls)
mapping.serialized_property_setter(serialized, encoded_value)
return serialized |
def update(self):
"""Update object properties."""
current_time = int(time.time())
last_refresh = 0 if self._last_refresh is None else self._last_refresh
if current_time >= (last_refresh + self._refresh_rate):
self.get_cameras_properties()
self.get_ambient_sensor_data()
self.get_camera_extended_properties()
self._attrs = self._session.refresh_attributes(self.name)
self._attrs = assert_is_dict(self._attrs)
_LOGGER.debug("Called base station update of camera properties: "
"Scan Interval: %s, New Properties: %s",
self._refresh_rate, self.camera_properties) | Update object properties. | Below is the the instruction that describes the task:
### Input:
Update object properties.
### Response:
def update(self):
"""Update object properties."""
current_time = int(time.time())
last_refresh = 0 if self._last_refresh is None else self._last_refresh
if current_time >= (last_refresh + self._refresh_rate):
self.get_cameras_properties()
self.get_ambient_sensor_data()
self.get_camera_extended_properties()
self._attrs = self._session.refresh_attributes(self.name)
self._attrs = assert_is_dict(self._attrs)
_LOGGER.debug("Called base station update of camera properties: "
"Scan Interval: %s, New Properties: %s",
self._refresh_rate, self.camera_properties) |
def transformer_base_vq_ada_32ex_packed():
"""Set of hyperparameters for lm1b packed following tpu params."""
hparams = transformer_base_v2()
expert_utils.update_hparams_for_vq_gating(hparams)
hparams.moe_num_experts = 32
hparams.gating_type = "vq"
# this gives us a batch size of 16 because each seq is len 256
hparams.batch_size = 5072
hparams.ffn_layer = "local_moe"
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_warmup_steps = 10000
# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128
hparams.learning_rate_decay_steps = 27200
hparams.num_heads = 4
hparams.num_blocks = 1
hparams.moe_k = 1
hparams.num_decoder_layers = 6
hparams.label_smoothing = 0.
hparams.layer_prepostprocess_dropout = 0.1
hparams.layer_postprocess_sequence = "dan"
hparams.layer_preprocess_sequence = "none"
hparams.weight_decay = 1e-06
hparams.attention_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.activation_dtype = "float32"
hparams.learning_rate = 0.1
hparams.learning_rate_constant = 1.0
return hparams | Set of hyperparameters for lm1b packed following tpu params. | Below is the the instruction that describes the task:
### Input:
Set of hyperparameters for lm1b packed following tpu params.
### Response:
def transformer_base_vq_ada_32ex_packed():
"""Set of hyperparameters for lm1b packed following tpu params."""
hparams = transformer_base_v2()
expert_utils.update_hparams_for_vq_gating(hparams)
hparams.moe_num_experts = 32
hparams.gating_type = "vq"
# this gives us a batch size of 16 because each seq is len 256
hparams.batch_size = 5072
hparams.ffn_layer = "local_moe"
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_warmup_steps = 10000
# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128
hparams.learning_rate_decay_steps = 27200
hparams.num_heads = 4
hparams.num_blocks = 1
hparams.moe_k = 1
hparams.num_decoder_layers = 6
hparams.label_smoothing = 0.
hparams.layer_prepostprocess_dropout = 0.1
hparams.layer_postprocess_sequence = "dan"
hparams.layer_preprocess_sequence = "none"
hparams.weight_decay = 1e-06
hparams.attention_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.activation_dtype = "float32"
hparams.learning_rate = 0.1
hparams.learning_rate_constant = 1.0
return hparams |
def _append_national_number(self, national_number):
"""Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable.
"""
prefix_before_nn_len = len(self._prefix_before_national_number)
if (self._should_add_space_after_national_prefix and prefix_before_nn_len > 0 and
self._prefix_before_national_number[-1] != _SEPARATOR_BEFORE_NATIONAL_NUMBER):
# We want to add a space after the national prefix if the national
# prefix formatting rule indicates that this would normally be
# done, with the exception of the case where we already appended a
# space because the NDD was surprisingly long.
return self._prefix_before_national_number + _SEPARATOR_BEFORE_NATIONAL_NUMBER + national_number
else:
return self._prefix_before_national_number + national_number | Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable. | Below is the the instruction that describes the task:
### Input:
Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable.
### Response:
def _append_national_number(self, national_number):
"""Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable.
"""
prefix_before_nn_len = len(self._prefix_before_national_number)
if (self._should_add_space_after_national_prefix and prefix_before_nn_len > 0 and
self._prefix_before_national_number[-1] != _SEPARATOR_BEFORE_NATIONAL_NUMBER):
# We want to add a space after the national prefix if the national
# prefix formatting rule indicates that this would normally be
# done, with the exception of the case where we already appended a
# space because the NDD was surprisingly long.
return self._prefix_before_national_number + _SEPARATOR_BEFORE_NATIONAL_NUMBER + national_number
else:
return self._prefix_before_national_number + national_number |
def Verify(self, mempool):
"""
Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise.
"""
if not super(ClaimTransaction, self).Verify(mempool):
return False
# wat does this do
# get all claim transactions from mempool list
# that are not this claim
# and gather all the claims of those claim transactions
# and see if they intersect the claims of this transaction
# and if that number is greater than zero that we do not verify
# (now, to do that in python)
# if (mempool.OfType < ClaimTransaction > ().Where(p => p != this).SelectMany(p= > p.Claims).Intersect(Claims).Count() > 0)
# return false;
# im sorry about the below
otherclaimTxs = [tx for tx in mempool if tx is ClaimTransaction and tx is not self]
for other in otherclaimTxs:
# check to see if the length of the intersection between this objects claim's and the other txs claims is > 0
if len([list(filter(lambda x: x in self.Claims, otherClaims)) for otherClaims in other.Claims]):
return False
txResult = None
for tx in self.GetTransactionResults():
if tx.AssetId == Blockchain.SystemCoin().Hash:
txResult = tx
break
if txResult is None or txResult.Amount > Fixed8(0):
return False
try:
return Blockchain.CalculateBonusIgnoreClaimed(self.Claims, False) == -txResult.Amount
except Exception as e:
logger.error('Could not calculate bonus: %s ' % e)
return False | Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise. | Below is the the instruction that describes the task:
### Input:
Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise.
### Response:
def Verify(self, mempool):
"""
Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise.
"""
if not super(ClaimTransaction, self).Verify(mempool):
return False
# wat does this do
# get all claim transactions from mempool list
# that are not this claim
# and gather all the claims of those claim transactions
# and see if they intersect the claims of this transaction
# and if that number is greater than zero that we do not verify
# (now, to do that in python)
# if (mempool.OfType < ClaimTransaction > ().Where(p => p != this).SelectMany(p= > p.Claims).Intersect(Claims).Count() > 0)
# return false;
# im sorry about the below
otherclaimTxs = [tx for tx in mempool if tx is ClaimTransaction and tx is not self]
for other in otherclaimTxs:
# check to see if the length of the intersection between this objects claim's and the other txs claims is > 0
if len([list(filter(lambda x: x in self.Claims, otherClaims)) for otherClaims in other.Claims]):
return False
txResult = None
for tx in self.GetTransactionResults():
if tx.AssetId == Blockchain.SystemCoin().Hash:
txResult = tx
break
if txResult is None or txResult.Amount > Fixed8(0):
return False
try:
return Blockchain.CalculateBonusIgnoreClaimed(self.Claims, False) == -txResult.Amount
except Exception as e:
logger.error('Could not calculate bonus: %s ' % e)
return False |
def reintegrate(self, fullPointList):
'''
Integrates the pitch values of the accent into a larger pitch contour
'''
# Erase the original region of the accent
fullPointList = _deletePoints(fullPointList, self.minT, self.maxT)
# Erase the new region of the accent
fullPointList = self.deleteOverlapping(fullPointList)
# Add the accent into the full pitch list
outputPointList = fullPointList + self.pointList
outputPointList.sort()
return outputPointList | Integrates the pitch values of the accent into a larger pitch contour | Below is the the instruction that describes the task:
### Input:
Integrates the pitch values of the accent into a larger pitch contour
### Response:
def reintegrate(self, fullPointList):
'''
Integrates the pitch values of the accent into a larger pitch contour
'''
# Erase the original region of the accent
fullPointList = _deletePoints(fullPointList, self.minT, self.maxT)
# Erase the new region of the accent
fullPointList = self.deleteOverlapping(fullPointList)
# Add the accent into the full pitch list
outputPointList = fullPointList + self.pointList
outputPointList.sort()
return outputPointList |
def pitch(times, frequencies, midi=False, unvoiced=False, ax=None, **kwargs):
'''Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
ax, _ = __get_axes(ax=ax)
times = np.asarray(times)
# First, segment into contiguously voiced contours
frequencies, voicings = freq_to_voicing(np.asarray(frequencies,
dtype=np.float))
# Here are all the change-points
v_changes = 1 + np.flatnonzero(voicings[1:] != voicings[:-1])
v_changes = np.unique(np.concatenate([[0], v_changes, [len(voicings)]]))
# Set up arrays of slices for voiced and unvoiced regions
v_slices, u_slices = [], []
for start, end in zip(v_changes, v_changes[1:]):
idx = slice(start, end)
# A region is voiced if its starting sample is voiced
# It's unvoiced if none of the samples in the region are voiced.
if voicings[start]:
v_slices.append(idx)
elif frequencies[idx].all():
u_slices.append(idx)
# Now we just need to plot the contour
style = dict()
style.update(next(ax._get_lines.prop_cycler))
style.update(kwargs)
if midi:
idx = frequencies > 0
frequencies[idx] = hz_to_midi(frequencies[idx])
# Tick at integer midi notes
ax.yaxis.set_minor_locator(MultipleLocator(1))
for idx in v_slices:
ax.plot(times[idx], frequencies[idx], **style)
style.pop('label', None)
# Plot the unvoiced portions
if unvoiced:
style['alpha'] = style.get('alpha', 1.0) * 0.5
for idx in u_slices:
ax.plot(times[idx], frequencies[idx], **style)
return ax | Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes | Below is the the instruction that describes the task:
### Input:
Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
### Response:
def pitch(times, frequencies, midi=False, unvoiced=False, ax=None, **kwargs):
'''Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
ax, _ = __get_axes(ax=ax)
times = np.asarray(times)
# First, segment into contiguously voiced contours
frequencies, voicings = freq_to_voicing(np.asarray(frequencies,
dtype=np.float))
# Here are all the change-points
v_changes = 1 + np.flatnonzero(voicings[1:] != voicings[:-1])
v_changes = np.unique(np.concatenate([[0], v_changes, [len(voicings)]]))
# Set up arrays of slices for voiced and unvoiced regions
v_slices, u_slices = [], []
for start, end in zip(v_changes, v_changes[1:]):
idx = slice(start, end)
# A region is voiced if its starting sample is voiced
# It's unvoiced if none of the samples in the region are voiced.
if voicings[start]:
v_slices.append(idx)
elif frequencies[idx].all():
u_slices.append(idx)
# Now we just need to plot the contour
style = dict()
style.update(next(ax._get_lines.prop_cycler))
style.update(kwargs)
if midi:
idx = frequencies > 0
frequencies[idx] = hz_to_midi(frequencies[idx])
# Tick at integer midi notes
ax.yaxis.set_minor_locator(MultipleLocator(1))
for idx in v_slices:
ax.plot(times[idx], frequencies[idx], **style)
style.pop('label', None)
# Plot the unvoiced portions
if unvoiced:
style['alpha'] = style.get('alpha', 1.0) * 0.5
for idx in u_slices:
ax.plot(times[idx], frequencies[idx], **style)
return ax |
def on_doctree_read(app, document):
"""
Hooks into Sphinx's ``doctree-read`` event.
"""
literal_blocks = uqbar.book.sphinx.collect_literal_blocks(document)
cache_mapping = uqbar.book.sphinx.group_literal_blocks_by_cache_path(literal_blocks)
node_mapping = {}
use_cache = bool(app.config["uqbar_book_use_cache"])
for cache_path, literal_block_groups in cache_mapping.items():
kwargs = dict(
extensions=app.uqbar_book_extensions,
setup_lines=app.config["uqbar_book_console_setup"],
teardown_lines=app.config["uqbar_book_console_teardown"],
use_black=bool(app.config["uqbar_book_use_black"]),
)
for literal_blocks in literal_block_groups:
try:
if use_cache:
local_node_mapping = uqbar.book.sphinx.interpret_code_blocks_with_cache(
literal_blocks, cache_path, app.connection, **kwargs
)
else:
local_node_mapping = uqbar.book.sphinx.interpret_code_blocks(
literal_blocks, **kwargs
)
node_mapping.update(local_node_mapping)
except ConsoleError as exception:
message = exception.args[0].splitlines()[-1]
logger.warning(message, location=exception.args[1])
if app.config["uqbar_book_strict"]:
raise
uqbar.book.sphinx.rebuild_document(document, node_mapping) | Hooks into Sphinx's ``doctree-read`` event. | Below is the the instruction that describes the task:
### Input:
Hooks into Sphinx's ``doctree-read`` event.
### Response:
def on_doctree_read(app, document):
"""
Hooks into Sphinx's ``doctree-read`` event.
"""
literal_blocks = uqbar.book.sphinx.collect_literal_blocks(document)
cache_mapping = uqbar.book.sphinx.group_literal_blocks_by_cache_path(literal_blocks)
node_mapping = {}
use_cache = bool(app.config["uqbar_book_use_cache"])
for cache_path, literal_block_groups in cache_mapping.items():
kwargs = dict(
extensions=app.uqbar_book_extensions,
setup_lines=app.config["uqbar_book_console_setup"],
teardown_lines=app.config["uqbar_book_console_teardown"],
use_black=bool(app.config["uqbar_book_use_black"]),
)
for literal_blocks in literal_block_groups:
try:
if use_cache:
local_node_mapping = uqbar.book.sphinx.interpret_code_blocks_with_cache(
literal_blocks, cache_path, app.connection, **kwargs
)
else:
local_node_mapping = uqbar.book.sphinx.interpret_code_blocks(
literal_blocks, **kwargs
)
node_mapping.update(local_node_mapping)
except ConsoleError as exception:
message = exception.args[0].splitlines()[-1]
logger.warning(message, location=exception.args[1])
if app.config["uqbar_book_strict"]:
raise
uqbar.book.sphinx.rebuild_document(document, node_mapping) |
def list_storage_accounts_sub(access_token, subscription_id):
'''List the storage accounts in the specified subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body list of storage accounts.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Storage/storageAccounts',
'?api-version=', STORAGE_API])
return do_get(endpoint, access_token) | List the storage accounts in the specified subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body list of storage accounts. | Below is the the instruction that describes the task:
### Input:
List the storage accounts in the specified subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body list of storage accounts.
### Response:
def list_storage_accounts_sub(access_token, subscription_id):
'''List the storage accounts in the specified subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body list of storage accounts.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Storage/storageAccounts',
'?api-version=', STORAGE_API])
return do_get(endpoint, access_token) |
def PilToRgb(pil):
'''Convert the color from a PIL-compatible integer to RGB.
Parameters:
pil: a PIL compatible color representation (0xBBGGRR)
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r: [0...1]
g: [0...1]
b: [0...1]
>>> '(%g, %g, %g)' % Color.PilToRgb(0x0080ff)
'(1, 0.501961, 0)'
'''
r = 0xff & pil
g = 0xff & (pil >> 8)
b = 0xff & (pil >> 16)
return tuple((v / 255.0 for v in (r, g, b))) | Convert the color from a PIL-compatible integer to RGB.
Parameters:
pil: a PIL compatible color representation (0xBBGGRR)
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r: [0...1]
g: [0...1]
b: [0...1]
>>> '(%g, %g, %g)' % Color.PilToRgb(0x0080ff)
'(1, 0.501961, 0)' | Below is the the instruction that describes the task:
### Input:
Convert the color from a PIL-compatible integer to RGB.
Parameters:
pil: a PIL compatible color representation (0xBBGGRR)
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r: [0...1]
g: [0...1]
b: [0...1]
>>> '(%g, %g, %g)' % Color.PilToRgb(0x0080ff)
'(1, 0.501961, 0)'
### Response:
def PilToRgb(pil):
'''Convert the color from a PIL-compatible integer to RGB.
Parameters:
pil: a PIL compatible color representation (0xBBGGRR)
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r: [0...1]
g: [0...1]
b: [0...1]
>>> '(%g, %g, %g)' % Color.PilToRgb(0x0080ff)
'(1, 0.501961, 0)'
'''
r = 0xff & pil
g = 0xff & (pil >> 8)
b = 0xff & (pil >> 16)
return tuple((v / 255.0 for v in (r, g, b))) |
def build(self, builder):
"""Build XML object, return the root, this is a copy for consistency and testing"""
params = dict(ODMVersion="1.3",
FileType=self.filetype,
CreationDateTime=self.creationdatetime,
Originator=self.originator,
FileOID=self.fileoid,
xmlns="http://www.cdisc.org/ns/odm/v1.3",
)
if self.granularity_type:
params['Granularity'] = self.granularity_type.value
if self.source_system:
params['SourceSystem'] = self.source_system
if self.source_system_version:
params['SourceSystemVersion'] = self.source_system_version
params['xmlns:mdsol'] = "http://www.mdsol.com/ns/odm/metadata"
if self.description:
params['Description'] = self.description
builder.start("ODM", params)
# Ask the children
if self.study is not None:
self.study.build(builder)
if self.clinical_data:
for clinical_data in self.clinical_data:
clinical_data.build(builder)
if self.admindata is not None:
self.admindata.build(builder)
builder.end("ODM")
return builder.close() | Build XML object, return the root, this is a copy for consistency and testing | Below is the the instruction that describes the task:
### Input:
Build XML object, return the root, this is a copy for consistency and testing
### Response:
def build(self, builder):
"""Build XML object, return the root, this is a copy for consistency and testing"""
params = dict(ODMVersion="1.3",
FileType=self.filetype,
CreationDateTime=self.creationdatetime,
Originator=self.originator,
FileOID=self.fileoid,
xmlns="http://www.cdisc.org/ns/odm/v1.3",
)
if self.granularity_type:
params['Granularity'] = self.granularity_type.value
if self.source_system:
params['SourceSystem'] = self.source_system
if self.source_system_version:
params['SourceSystemVersion'] = self.source_system_version
params['xmlns:mdsol'] = "http://www.mdsol.com/ns/odm/metadata"
if self.description:
params['Description'] = self.description
builder.start("ODM", params)
# Ask the children
if self.study is not None:
self.study.build(builder)
if self.clinical_data:
for clinical_data in self.clinical_data:
clinical_data.build(builder)
if self.admindata is not None:
self.admindata.build(builder)
builder.end("ODM")
return builder.close() |
def checkOptions(options, parser):
""" Check options, throw parser.error() if something goes wrong
"""
if options.jobStore == None:
parser.error("Specify --jobStore")
defaultCategories = ["time", "clock", "wait", "memory"]
if options.categories is None:
options.categories = defaultCategories
else:
options.categories = [x.lower() for x in options.categories.split(",")]
for c in options.categories:
if c not in defaultCategories:
parser.error("Unknown category %s. Must be from %s"
% (c, str(defaultCategories)))
extraSort = ["count", "alpha"]
if options.sortCategory is not None:
if (options.sortCategory not in defaultCategories and
options.sortCategory not in extraSort):
parser.error("Unknown --sortCategory %s. Must be from %s"
% (options.sortCategory,
str(defaultCategories + extraSort)))
sortFields = ["min", "med", "ave", "max", "total"]
if options.sortField is not None:
if (options.sortField not in sortFields):
parser.error("Unknown --sortField %s. Must be from %s"
% (options.sortField, str(sortFields))) | Check options, throw parser.error() if something goes wrong | Below is the the instruction that describes the task:
### Input:
Check options, throw parser.error() if something goes wrong
### Response:
def checkOptions(options, parser):
""" Check options, throw parser.error() if something goes wrong
"""
if options.jobStore == None:
parser.error("Specify --jobStore")
defaultCategories = ["time", "clock", "wait", "memory"]
if options.categories is None:
options.categories = defaultCategories
else:
options.categories = [x.lower() for x in options.categories.split(",")]
for c in options.categories:
if c not in defaultCategories:
parser.error("Unknown category %s. Must be from %s"
% (c, str(defaultCategories)))
extraSort = ["count", "alpha"]
if options.sortCategory is not None:
if (options.sortCategory not in defaultCategories and
options.sortCategory not in extraSort):
parser.error("Unknown --sortCategory %s. Must be from %s"
% (options.sortCategory,
str(defaultCategories + extraSort)))
sortFields = ["min", "med", "ave", "max", "total"]
if options.sortField is not None:
if (options.sortField not in sortFields):
parser.error("Unknown --sortField %s. Must be from %s"
% (options.sortField, str(sortFields))) |
def run_program(self, command, working_directory=os.getcwd(),
environment=None, cleanup_files=True,
native_spec="-l cputype=intel"):
"""
Run a program through the grid, capturing the standard output.
"""
try:
s = drmaa.Session()
s.initialize()
jt = s.createJobTemplate()
jt.remoteCommand = os.path.dirname(
os.path.abspath(__file__)) + '/run_program.sh'
jt.args = [command]
if environment is not None:
jt.jobEnvironment = environment
jt.workingDirectory = working_directory
jt.nativeSpecification = native_spec
output_filename = os.path.join(working_directory, 'output.txt')
jt.outputPath = ':' + output_filename
jt.joinFiles = True
jobid = s.runJob(jt)
s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
with open(output_filename, 'r') as output:
stdout = output.read()
# Clean up
if cleanup_files:
os.remove(output_filename)
finally:
try:
s.control(drmaa.JOB_IDS_SESSION_ALL,
drmaa.JobControlAction.TERMINATE)
s.synchronize([drmaa.JOB_IDS_SESSION_ALL], dispose=True)
s.exit()
except(drmaa.errors.NoActiveSessionException):
pass
return stdout | Run a program through the grid, capturing the standard output. | Below is the the instruction that describes the task:
### Input:
Run a program through the grid, capturing the standard output.
### Response:
def run_program(self, command, working_directory=os.getcwd(),
environment=None, cleanup_files=True,
native_spec="-l cputype=intel"):
"""
Run a program through the grid, capturing the standard output.
"""
try:
s = drmaa.Session()
s.initialize()
jt = s.createJobTemplate()
jt.remoteCommand = os.path.dirname(
os.path.abspath(__file__)) + '/run_program.sh'
jt.args = [command]
if environment is not None:
jt.jobEnvironment = environment
jt.workingDirectory = working_directory
jt.nativeSpecification = native_spec
output_filename = os.path.join(working_directory, 'output.txt')
jt.outputPath = ':' + output_filename
jt.joinFiles = True
jobid = s.runJob(jt)
s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
with open(output_filename, 'r') as output:
stdout = output.read()
# Clean up
if cleanup_files:
os.remove(output_filename)
finally:
try:
s.control(drmaa.JOB_IDS_SESSION_ALL,
drmaa.JobControlAction.TERMINATE)
s.synchronize([drmaa.JOB_IDS_SESSION_ALL], dispose=True)
s.exit()
except(drmaa.errors.NoActiveSessionException):
pass
return stdout |
def area(self):
r"""The area of the current surface.
For surfaces in :math:`\mathbf{R}^2`, this computes the area via
Green's theorem. Using the vector field :math:`\mathbf{F} =
\left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2`
Green's theorem says twice the area is equal to
.. math::
\int_{B\left(\mathcal{U}\right)} 2 \, d\mathbf{x} =
\int_{\partial B\left(\mathcal{U}\right)} -y \, dx + x \, dy.
This relies on the assumption that the current surface is valid, which
implies that the image of the unit triangle under the B |eacute| zier
map --- :math:`B\left(\mathcal{U}\right)` --- has the edges of the
surface as its boundary.
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current surface.
Raises:
NotImplementedError: If the current surface isn't in
:math:`\mathbf{R}^2`.
"""
if self._dimension != 2:
raise NotImplementedError(
"2D is the only supported dimension",
"Current dimension",
self._dimension,
)
edge1, edge2, edge3 = self._get_edges()
return _surface_helpers.compute_area(
(edge1._nodes, edge2._nodes, edge3._nodes)
) | r"""The area of the current surface.
For surfaces in :math:`\mathbf{R}^2`, this computes the area via
Green's theorem. Using the vector field :math:`\mathbf{F} =
\left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2`
Green's theorem says twice the area is equal to
.. math::
\int_{B\left(\mathcal{U}\right)} 2 \, d\mathbf{x} =
\int_{\partial B\left(\mathcal{U}\right)} -y \, dx + x \, dy.
This relies on the assumption that the current surface is valid, which
implies that the image of the unit triangle under the B |eacute| zier
map --- :math:`B\left(\mathcal{U}\right)` --- has the edges of the
surface as its boundary.
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current surface.
Raises:
NotImplementedError: If the current surface isn't in
:math:`\mathbf{R}^2`. | Below is the the instruction that describes the task:
### Input:
r"""The area of the current surface.
For surfaces in :math:`\mathbf{R}^2`, this computes the area via
Green's theorem. Using the vector field :math:`\mathbf{F} =
\left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2`
Green's theorem says twice the area is equal to
.. math::
\int_{B\left(\mathcal{U}\right)} 2 \, d\mathbf{x} =
\int_{\partial B\left(\mathcal{U}\right)} -y \, dx + x \, dy.
This relies on the assumption that the current surface is valid, which
implies that the image of the unit triangle under the B |eacute| zier
map --- :math:`B\left(\mathcal{U}\right)` --- has the edges of the
surface as its boundary.
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current surface.
Raises:
NotImplementedError: If the current surface isn't in
:math:`\mathbf{R}^2`.
### Response:
def area(self):
r"""The area of the current surface.
For surfaces in :math:`\mathbf{R}^2`, this computes the area via
Green's theorem. Using the vector field :math:`\mathbf{F} =
\left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2`
Green's theorem says twice the area is equal to
.. math::
\int_{B\left(\mathcal{U}\right)} 2 \, d\mathbf{x} =
\int_{\partial B\left(\mathcal{U}\right)} -y \, dx + x \, dy.
This relies on the assumption that the current surface is valid, which
implies that the image of the unit triangle under the B |eacute| zier
map --- :math:`B\left(\mathcal{U}\right)` --- has the edges of the
surface as its boundary.
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current surface.
Raises:
NotImplementedError: If the current surface isn't in
:math:`\mathbf{R}^2`.
"""
if self._dimension != 2:
raise NotImplementedError(
"2D is the only supported dimension",
"Current dimension",
self._dimension,
)
edge1, edge2, edge3 = self._get_edges()
return _surface_helpers.compute_area(
(edge1._nodes, edge2._nodes, edge3._nodes)
) |
def extract_header(msg_or_header):
"""Given a message or header, return the header."""
if not msg_or_header:
return {}
try:
# See if msg_or_header is the entire message.
h = msg_or_header['header']
except KeyError:
try:
# See if msg_or_header is just the header
h = msg_or_header['msg_id']
except KeyError:
raise
else:
h = msg_or_header
if not isinstance(h, dict):
h = dict(h)
return h | Given a message or header, return the header. | Below is the the instruction that describes the task:
### Input:
Given a message or header, return the header.
### Response:
def extract_header(msg_or_header):
"""Given a message or header, return the header."""
if not msg_or_header:
return {}
try:
# See if msg_or_header is the entire message.
h = msg_or_header['header']
except KeyError:
try:
# See if msg_or_header is just the header
h = msg_or_header['msg_id']
except KeyError:
raise
else:
h = msg_or_header
if not isinstance(h, dict):
h = dict(h)
return h |
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
if end is None:
end = len(buf)
for i in range(start, end):
buf[i] = self._readbyte() | Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include | Below is the the instruction that describes the task:
### Input:
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
### Response:
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
if end is None:
end = len(buf)
for i in range(start, end):
buf[i] = self._readbyte() |
def parcor_stable(filt):
"""
Tests whether the given filter is stable or not by using the partial
correlation coefficients (reflection coefficients) of the given filter.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when all correlation coefficients are inside the
unit circle. Critical stability (i.e., when outer coefficient has magnitude
equals to one) is seem as an instability, and returns False.
See Also
--------
parcor :
Partial correlation coefficients generator.
lsf_stable :
Tests filter stability with Line Spectral Frequencies (LSF) values.
"""
try:
return all(abs(k) < 1 for k in parcor(ZFilter(filt.denpoly)))
except ParCorError:
return False | Tests whether the given filter is stable or not by using the partial
correlation coefficients (reflection coefficients) of the given filter.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when all correlation coefficients are inside the
unit circle. Critical stability (i.e., when outer coefficient has magnitude
equals to one) is seem as an instability, and returns False.
See Also
--------
parcor :
Partial correlation coefficients generator.
lsf_stable :
Tests filter stability with Line Spectral Frequencies (LSF) values. | Below is the the instruction that describes the task:
### Input:
Tests whether the given filter is stable or not by using the partial
correlation coefficients (reflection coefficients) of the given filter.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when all correlation coefficients are inside the
unit circle. Critical stability (i.e., when outer coefficient has magnitude
equals to one) is seem as an instability, and returns False.
See Also
--------
parcor :
Partial correlation coefficients generator.
lsf_stable :
Tests filter stability with Line Spectral Frequencies (LSF) values.
### Response:
def parcor_stable(filt):
"""
Tests whether the given filter is stable or not by using the partial
correlation coefficients (reflection coefficients) of the given filter.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when all correlation coefficients are inside the
unit circle. Critical stability (i.e., when outer coefficient has magnitude
equals to one) is seem as an instability, and returns False.
See Also
--------
parcor :
Partial correlation coefficients generator.
lsf_stable :
Tests filter stability with Line Spectral Frequencies (LSF) values.
"""
try:
return all(abs(k) < 1 for k in parcor(ZFilter(filt.denpoly)))
except ParCorError:
return False |
def _get_converter_module(sk_obj):
"""
Returns the module holding the conversion functions for a
particular model).
"""
try:
cv_idx = _converter_lookup[sk_obj.__class__]
except KeyError:
raise ValueError(
"Transformer '%s' not supported; supported transformers are %s."
% (repr(sk_obj),
",".join(k.__name__ for k in _converter_module_list)))
return _converter_module_list[cv_idx] | Returns the module holding the conversion functions for a
particular model). | Below is the the instruction that describes the task:
### Input:
Returns the module holding the conversion functions for a
particular model).
### Response:
def _get_converter_module(sk_obj):
"""
Returns the module holding the conversion functions for a
particular model).
"""
try:
cv_idx = _converter_lookup[sk_obj.__class__]
except KeyError:
raise ValueError(
"Transformer '%s' not supported; supported transformers are %s."
% (repr(sk_obj),
",".join(k.__name__ for k in _converter_module_list)))
return _converter_module_list[cv_idx] |
def set_intersection(self, division, intersection):
"""Set intersection percentage of intersecting divisions."""
IntersectRelationship.objects.filter(
from_division=self, to_division=division
).update(intersection=intersection) | Set intersection percentage of intersecting divisions. | Below is the the instruction that describes the task:
### Input:
Set intersection percentage of intersecting divisions.
### Response:
def set_intersection(self, division, intersection):
"""Set intersection percentage of intersecting divisions."""
IntersectRelationship.objects.filter(
from_division=self, to_division=division
).update(intersection=intersection) |
def import_attr(path):
"""
transform a python dotted path to the attr
:param path: A dotted path to a python object or a python object
:type path: :obj:`unicode` or :obj:`str` or anything
:return: The python object pointed by the dotted path or the python object unchanged
"""
# if we got a str, decode it to unicode (normally it should only contain ascii)
if isinstance(path, six.binary_type):
path = path.decode("utf-8")
# if path is not an unicode, return it unchanged (may be it is already the attribute to import)
if not isinstance(path, six.text_type):
return path
if u"." not in path:
ValueError("%r should be of the form `module.attr` and we just got `attr`" % path)
module, attr = path.rsplit(u'.', 1)
try:
return getattr(import_module(module), attr)
except ImportError:
raise ImportError("Module %r not found" % module)
except AttributeError:
raise AttributeError("Module %r has not attribut %r" % (module, attr)) | transform a python dotted path to the attr
:param path: A dotted path to a python object or a python object
:type path: :obj:`unicode` or :obj:`str` or anything
:return: The python object pointed by the dotted path or the python object unchanged | Below is the the instruction that describes the task:
### Input:
transform a python dotted path to the attr
:param path: A dotted path to a python object or a python object
:type path: :obj:`unicode` or :obj:`str` or anything
:return: The python object pointed by the dotted path or the python object unchanged
### Response:
def import_attr(path):
"""
transform a python dotted path to the attr
:param path: A dotted path to a python object or a python object
:type path: :obj:`unicode` or :obj:`str` or anything
:return: The python object pointed by the dotted path or the python object unchanged
"""
# if we got a str, decode it to unicode (normally it should only contain ascii)
if isinstance(path, six.binary_type):
path = path.decode("utf-8")
# if path is not an unicode, return it unchanged (may be it is already the attribute to import)
if not isinstance(path, six.text_type):
return path
if u"." not in path:
ValueError("%r should be of the form `module.attr` and we just got `attr`" % path)
module, attr = path.rsplit(u'.', 1)
try:
return getattr(import_module(module), attr)
except ImportError:
raise ImportError("Module %r not found" % module)
except AttributeError:
raise AttributeError("Module %r has not attribut %r" % (module, attr)) |
def mapping_get(index, doc_type, hosts=None, profile=None):
'''
Retrieve mapping definition of index or index/type
index
Index for the mapping
doc_type
Name of the document type
CLI example::
salt myminion elasticsearch.mapping_get testindex user
'''
es = _get_instance(hosts, profile)
try:
return es.indices.get_mapping(index=index, doc_type=doc_type)
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot retrieve mapping {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error)) | Retrieve mapping definition of index or index/type
index
Index for the mapping
doc_type
Name of the document type
CLI example::
salt myminion elasticsearch.mapping_get testindex user | Below is the the instruction that describes the task:
### Input:
Retrieve mapping definition of index or index/type
index
Index for the mapping
doc_type
Name of the document type
CLI example::
salt myminion elasticsearch.mapping_get testindex user
### Response:
def mapping_get(index, doc_type, hosts=None, profile=None):
'''
Retrieve mapping definition of index or index/type
index
Index for the mapping
doc_type
Name of the document type
CLI example::
salt myminion elasticsearch.mapping_get testindex user
'''
es = _get_instance(hosts, profile)
try:
return es.indices.get_mapping(index=index, doc_type=doc_type)
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot retrieve mapping {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error)) |
def update_exc(exc, msg, before=True, separator="\n"):
"""
Adds additional text to an exception's error message.
The new text will be added before the existing text by default; to append
it after the original text, pass False to the `before` parameter.
By default the old and new text will be separated by a newline. If you wish
to use a different separator, pass that as the `separator` parameter.
"""
emsg = exc.message
if before:
parts = (msg, separator, emsg)
else:
parts = (emsg, separator, msg)
new_msg = "%s%s%s" % parts
new_args = (new_msg, ) + exc.args[1:]
exc.message = new_msg
exc.args = new_args
return exc | Adds additional text to an exception's error message.
The new text will be added before the existing text by default; to append
it after the original text, pass False to the `before` parameter.
By default the old and new text will be separated by a newline. If you wish
to use a different separator, pass that as the `separator` parameter. | Below is the the instruction that describes the task:
### Input:
Adds additional text to an exception's error message.
The new text will be added before the existing text by default; to append
it after the original text, pass False to the `before` parameter.
By default the old and new text will be separated by a newline. If you wish
to use a different separator, pass that as the `separator` parameter.
### Response:
def update_exc(exc, msg, before=True, separator="\n"):
"""
Adds additional text to an exception's error message.
The new text will be added before the existing text by default; to append
it after the original text, pass False to the `before` parameter.
By default the old and new text will be separated by a newline. If you wish
to use a different separator, pass that as the `separator` parameter.
"""
emsg = exc.message
if before:
parts = (msg, separator, emsg)
else:
parts = (emsg, separator, msg)
new_msg = "%s%s%s" % parts
new_args = (new_msg, ) + exc.args[1:]
exc.message = new_msg
exc.args = new_args
return exc |
def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register | Decorator for register function that adds an appropriate inline. | Below is the the instruction that describes the task:
### Input:
Decorator for register function that adds an appropriate inline.
### Response:
def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register |
def expand_alias(self, line):
""" Expand an alias in the command line
Returns the provided command line, possibly with the first word
(command) translated according to alias expansion rules.
[ipython]|16> _ip.expand_aliases("np myfile.txt")
<16> 'q:/opt/np/notepad++.exe myfile.txt'
"""
pre,_,fn,rest = split_user_input(line)
res = pre + self.expand_aliases(fn, rest)
return res | Expand an alias in the command line
Returns the provided command line, possibly with the first word
(command) translated according to alias expansion rules.
[ipython]|16> _ip.expand_aliases("np myfile.txt")
<16> 'q:/opt/np/notepad++.exe myfile.txt' | Below is the the instruction that describes the task:
### Input:
Expand an alias in the command line
Returns the provided command line, possibly with the first word
(command) translated according to alias expansion rules.
[ipython]|16> _ip.expand_aliases("np myfile.txt")
<16> 'q:/opt/np/notepad++.exe myfile.txt'
### Response:
def expand_alias(self, line):
""" Expand an alias in the command line
Returns the provided command line, possibly with the first word
(command) translated according to alias expansion rules.
[ipython]|16> _ip.expand_aliases("np myfile.txt")
<16> 'q:/opt/np/notepad++.exe myfile.txt'
"""
pre,_,fn,rest = split_user_input(line)
res = pre + self.expand_aliases(fn, rest)
return res |
def write(self, label, index):
"""
Saves a new label, index mapping to the cache.
Raises a RuntimeError on a conflict.
"""
if label in self.cache:
if self.cache[label] != index:
error_message = 'cache_conflict on label: {} with index: {}\ncache dump: {}'.format(label, index, self.cache)
raise RuntimeError(error_message)
else:
self.cache[label] = index | Saves a new label, index mapping to the cache.
Raises a RuntimeError on a conflict. | Below is the the instruction that describes the task:
### Input:
Saves a new label, index mapping to the cache.
Raises a RuntimeError on a conflict.
### Response:
def write(self, label, index):
"""
Saves a new label, index mapping to the cache.
Raises a RuntimeError on a conflict.
"""
if label in self.cache:
if self.cache[label] != index:
error_message = 'cache_conflict on label: {} with index: {}\ncache dump: {}'.format(label, index, self.cache)
raise RuntimeError(error_message)
else:
self.cache[label] = index |
def send_signal(self, s):
"""
Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
"""
self._get_signal_event(s) # Check if signal has been enabled
pid = self.get_pid()
if not pid:
raise ValueError('Daemon is not running.')
os.kill(pid, s) | Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised. | Below is the the instruction that describes the task:
### Input:
Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
### Response:
def send_signal(self, s):
"""
Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
"""
self._get_signal_event(s) # Check if signal has been enabled
pid = self.get_pid()
if not pid:
raise ValueError('Daemon is not running.')
os.kill(pid, s) |
def catch_osd_errors(conn, logger, args):
"""
Look for possible issues when checking the status of an OSD and
report them back to the user.
"""
logger.info('checking OSD status...')
status = osd_status_check(conn, args.cluster)
osds = int(status.get('num_osds', 0))
up_osds = int(status.get('num_up_osds', 0))
in_osds = int(status.get('num_in_osds', 0))
full = status.get('full', False)
nearfull = status.get('nearfull', False)
if osds > up_osds:
difference = osds - up_osds
logger.warning('there %s %d OSD%s down' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if osds > in_osds:
difference = osds - in_osds
logger.warning('there %s %d OSD%s out' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if full:
logger.warning('OSDs are full!')
if nearfull:
logger.warning('OSDs are near full!') | Look for possible issues when checking the status of an OSD and
report them back to the user. | Below is the the instruction that describes the task:
### Input:
Look for possible issues when checking the status of an OSD and
report them back to the user.
### Response:
def catch_osd_errors(conn, logger, args):
"""
Look for possible issues when checking the status of an OSD and
report them back to the user.
"""
logger.info('checking OSD status...')
status = osd_status_check(conn, args.cluster)
osds = int(status.get('num_osds', 0))
up_osds = int(status.get('num_up_osds', 0))
in_osds = int(status.get('num_in_osds', 0))
full = status.get('full', False)
nearfull = status.get('nearfull', False)
if osds > up_osds:
difference = osds - up_osds
logger.warning('there %s %d OSD%s down' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if osds > in_osds:
difference = osds - in_osds
logger.warning('there %s %d OSD%s out' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if full:
logger.warning('OSDs are full!')
if nearfull:
logger.warning('OSDs are near full!') |
def run(self):
"""主函数"""
# try:
self.fenum.write('\n')
self.fcpp = open(os.path.join(os.path.abspath(self.ctp_dir), 'ThostFtdcUserApiDataType.h'), 'r')
for idx, line in enumerate(self.fcpp):
l = self.process_line(idx, line)
self.f_data_type.write(l)
self.fcpp.close()
self.f_data_type.close()
self.fenum.close()
print('ctp_data_type.py生成过程完成') | 主函数 | Below is the the instruction that describes the task:
### Input:
主函数
### Response:
def run(self):
"""主函数"""
# try:
self.fenum.write('\n')
self.fcpp = open(os.path.join(os.path.abspath(self.ctp_dir), 'ThostFtdcUserApiDataType.h'), 'r')
for idx, line in enumerate(self.fcpp):
l = self.process_line(idx, line)
self.f_data_type.write(l)
self.fcpp.close()
self.f_data_type.close()
self.fenum.close()
print('ctp_data_type.py生成过程完成') |
def show_vmatrix(vm):
'''
d = {1: {2: {22: 222}}, 3: {'a': 'b'}}
vm = [[[222]], ['b']]
show_vmatrix(vm)
'''
unhandled = vm
while(unhandled.__len__()>0):
next_unhandled = []
for i in range(0,unhandled.__len__()):
ele = unhandled[i]
print(ele)
cond = elel.is_leaf(ele)
if(cond):
pass
else:
children = ele[0]
next_unhandled.append(children)
unhandled = next_unhandled | d = {1: {2: {22: 222}}, 3: {'a': 'b'}}
vm = [[[222]], ['b']]
show_vmatrix(vm) | Below is the the instruction that describes the task:
### Input:
d = {1: {2: {22: 222}}, 3: {'a': 'b'}}
vm = [[[222]], ['b']]
show_vmatrix(vm)
### Response:
def show_vmatrix(vm):
'''
d = {1: {2: {22: 222}}, 3: {'a': 'b'}}
vm = [[[222]], ['b']]
show_vmatrix(vm)
'''
unhandled = vm
while(unhandled.__len__()>0):
next_unhandled = []
for i in range(0,unhandled.__len__()):
ele = unhandled[i]
print(ele)
cond = elel.is_leaf(ele)
if(cond):
pass
else:
children = ele[0]
next_unhandled.append(children)
unhandled = next_unhandled |
def restore(self):
"""Restores the modules that the saver knows about into
sys.modules.
"""
try:
for modname, mod in self._saved.items():
if mod is not None:
sys.modules[modname] = mod
else:
try:
del sys.modules[modname]
except KeyError:
pass
finally:
imp.release_lock() | Restores the modules that the saver knows about into
sys.modules. | Below is the the instruction that describes the task:
### Input:
Restores the modules that the saver knows about into
sys.modules.
### Response:
def restore(self):
"""Restores the modules that the saver knows about into
sys.modules.
"""
try:
for modname, mod in self._saved.items():
if mod is not None:
sys.modules[modname] = mod
else:
try:
del sys.modules[modname]
except KeyError:
pass
finally:
imp.release_lock() |
def vrel(v1, v2):
"""
Return the relative difference between two 3-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrel_c.html
:param v1: First vector
:type v1: 3-Element Array of floats
:param v2: Second vector
:type v2: 3-Element Array of floats
:return: the relative difference between v1 and v2.
:rtype: float
"""
v1 = stypes.toDoubleVector(v1)
v2 = stypes.toDoubleVector(v2)
return libspice.vrel_c(v1, v2) | Return the relative difference between two 3-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrel_c.html
:param v1: First vector
:type v1: 3-Element Array of floats
:param v2: Second vector
:type v2: 3-Element Array of floats
:return: the relative difference between v1 and v2.
:rtype: float | Below is the the instruction that describes the task:
### Input:
Return the relative difference between two 3-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrel_c.html
:param v1: First vector
:type v1: 3-Element Array of floats
:param v2: Second vector
:type v2: 3-Element Array of floats
:return: the relative difference between v1 and v2.
:rtype: float
### Response:
def vrel(v1, v2):
"""
Return the relative difference between two 3-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrel_c.html
:param v1: First vector
:type v1: 3-Element Array of floats
:param v2: Second vector
:type v2: 3-Element Array of floats
:return: the relative difference between v1 and v2.
:rtype: float
"""
v1 = stypes.toDoubleVector(v1)
v2 = stypes.toDoubleVector(v2)
return libspice.vrel_c(v1, v2) |
def Normalize(str_):
"""The Normalize(str) function.
This one also accepts Unicode string input (in the RFC only UTF-8
strings are used).
"""
# pylint: disable=C0103
if isinstance(str_, bytes):
str_ = str_.decode("utf-8")
return SASLPREP.prepare(str_).encode("utf-8") | The Normalize(str) function.
This one also accepts Unicode string input (in the RFC only UTF-8
strings are used). | Below is the the instruction that describes the task:
### Input:
The Normalize(str) function.
This one also accepts Unicode string input (in the RFC only UTF-8
strings are used).
### Response:
def Normalize(str_):
"""The Normalize(str) function.
This one also accepts Unicode string input (in the RFC only UTF-8
strings are used).
"""
# pylint: disable=C0103
if isinstance(str_, bytes):
str_ = str_.decode("utf-8")
return SASLPREP.prepare(str_).encode("utf-8") |
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error} | Set errors markup. | Below is the the instruction that describes the task:
### Input:
Set errors markup.
### Response:
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error} |
def _get_output_columns(nodes, context):
"""Get the output columns for a list of SqlNodes.
Args:
nodes: List[SqlNode], the nodes to get output columns from.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Column], list of SqlAlchemy Columns to output for this query.
"""
columns = []
for node in nodes:
for sql_output in sql_context_helpers.get_outputs(node, context):
field_name = sql_output.field_name
column = sql_context_helpers.get_column(field_name, node, context)
column = column.label(sql_output.output_name)
columns.append(column)
return columns | Get the output columns for a list of SqlNodes.
Args:
nodes: List[SqlNode], the nodes to get output columns from.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Column], list of SqlAlchemy Columns to output for this query. | Below is the the instruction that describes the task:
### Input:
Get the output columns for a list of SqlNodes.
Args:
nodes: List[SqlNode], the nodes to get output columns from.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Column], list of SqlAlchemy Columns to output for this query.
### Response:
def _get_output_columns(nodes, context):
"""Get the output columns for a list of SqlNodes.
Args:
nodes: List[SqlNode], the nodes to get output columns from.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Column], list of SqlAlchemy Columns to output for this query.
"""
columns = []
for node in nodes:
for sql_output in sql_context_helpers.get_outputs(node, context):
field_name = sql_output.field_name
column = sql_context_helpers.get_column(field_name, node, context)
column = column.label(sql_output.output_name)
columns.append(column)
return columns |
def get_changeset(args):
"""Dump the changeset objects as JSON, reading the provided bundle YAML.
The YAML can be provided either from stdin or by passing a file path as
first argument.
"""
# Parse the arguments.
parser = argparse.ArgumentParser(description=get_changeset.__doc__)
parser.add_argument(
'infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='path to the bundle YAML file')
parser.add_argument(
'--version', action='version', version='%(prog)s {}'.format(version))
options = parser.parse_args(args)
# Parse the provided YAML file.
try:
bundle = yaml.safe_load(options.infile)
except Exception:
return 'error: the provided bundle is not a valid YAML'
# Validate the bundle object.
errors = validation.validate(bundle)
if errors:
return '\n'.join(errors)
# Dump the changeset to stdout.
print('[')
for num, change in enumerate(changeset.parse(bundle)):
if num:
print(',')
print(json.dumps(change))
print(']') | Dump the changeset objects as JSON, reading the provided bundle YAML.
The YAML can be provided either from stdin or by passing a file path as
first argument. | Below is the the instruction that describes the task:
### Input:
Dump the changeset objects as JSON, reading the provided bundle YAML.
The YAML can be provided either from stdin or by passing a file path as
first argument.
### Response:
def get_changeset(args):
"""Dump the changeset objects as JSON, reading the provided bundle YAML.
The YAML can be provided either from stdin or by passing a file path as
first argument.
"""
# Parse the arguments.
parser = argparse.ArgumentParser(description=get_changeset.__doc__)
parser.add_argument(
'infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='path to the bundle YAML file')
parser.add_argument(
'--version', action='version', version='%(prog)s {}'.format(version))
options = parser.parse_args(args)
# Parse the provided YAML file.
try:
bundle = yaml.safe_load(options.infile)
except Exception:
return 'error: the provided bundle is not a valid YAML'
# Validate the bundle object.
errors = validation.validate(bundle)
if errors:
return '\n'.join(errors)
# Dump the changeset to stdout.
print('[')
for num, change in enumerate(changeset.parse(bundle)):
if num:
print(',')
print(json.dumps(change))
print(']') |
def on_lxml_loads(self, lxml, config, content, **kwargs):
""" The `lxml <https://pypi.org/project/lxml/>`_ loads method.
:param module lxml: The ``lxml`` module
:param class config: The loading config class
:param str content: The content to deserialize
:param str encoding: The encoding to read the given xml document as, defaults to
"utf-8", optional
:returns: The deserialized dictionary
:rtype: dict
"""
# NOTE: lazy import of XMLParser because class requires lxml to exist on import
from ..contrib.xml_parser import XMLParser
return XMLParser.from_xml(
content, encoding=kwargs.pop("encoding", "utf-8")
).to_dict() | The `lxml <https://pypi.org/project/lxml/>`_ loads method.
:param module lxml: The ``lxml`` module
:param class config: The loading config class
:param str content: The content to deserialize
:param str encoding: The encoding to read the given xml document as, defaults to
"utf-8", optional
:returns: The deserialized dictionary
:rtype: dict | Below is the the instruction that describes the task:
### Input:
The `lxml <https://pypi.org/project/lxml/>`_ loads method.
:param module lxml: The ``lxml`` module
:param class config: The loading config class
:param str content: The content to deserialize
:param str encoding: The encoding to read the given xml document as, defaults to
"utf-8", optional
:returns: The deserialized dictionary
:rtype: dict
### Response:
def on_lxml_loads(self, lxml, config, content, **kwargs):
""" The `lxml <https://pypi.org/project/lxml/>`_ loads method.
:param module lxml: The ``lxml`` module
:param class config: The loading config class
:param str content: The content to deserialize
:param str encoding: The encoding to read the given xml document as, defaults to
"utf-8", optional
:returns: The deserialized dictionary
:rtype: dict
"""
# NOTE: lazy import of XMLParser because class requires lxml to exist on import
from ..contrib.xml_parser import XMLParser
return XMLParser.from_xml(
content, encoding=kwargs.pop("encoding", "utf-8")
).to_dict() |
async def send_notification(self, title, message):
"""Send notification."""
query = gql(
"""
mutation{
sendPushNotification(input: {
title: "%s",
message: "%s",
}){
successful
pushedToNumberOfDevices
}
}
"""
% (title, message)
)
res = await self.execute(query)
if not res:
return False
noti = res.get("sendPushNotification", {})
successful = noti.get("successful", False)
pushed_to_number_of_devices = noti.get("pushedToNumberOfDevices", 0)
_LOGGER.debug(
"send_notification: status %s, send to %s devices",
successful,
pushed_to_number_of_devices,
)
return successful | Send notification. | Below is the the instruction that describes the task:
### Input:
Send notification.
### Response:
async def send_notification(self, title, message):
"""Send notification."""
query = gql(
"""
mutation{
sendPushNotification(input: {
title: "%s",
message: "%s",
}){
successful
pushedToNumberOfDevices
}
}
"""
% (title, message)
)
res = await self.execute(query)
if not res:
return False
noti = res.get("sendPushNotification", {})
successful = noti.get("successful", False)
pushed_to_number_of_devices = noti.get("pushedToNumberOfDevices", 0)
_LOGGER.debug(
"send_notification: status %s, send to %s devices",
successful,
pushed_to_number_of_devices,
)
return successful |
def ProcessBlocks(self, block_limit=1000):
"""
Method called on a loop to check the current height of the blockchain. If the height of the blockchain
is more than the current stored height in the wallet, we get the next block in line and
processes it.
In the case that the wallet height is far behind the height of the blockchain, we do this 1000
blocks at a time.
Args:
block_limit (int): the number of blocks to process synchronously. defaults to 1000. set to 0 to block until the wallet is fully rebuilt.
"""
self._lock.acquire()
try:
blockcount = 0
while self._current_height <= Blockchain.Default().Height and (block_limit == 0 or blockcount < block_limit):
block = Blockchain.Default().GetBlockByHeight(self._current_height)
if block is not None:
self.ProcessNewBlock(block)
else:
self._current_height += 1
blockcount += 1
self.SaveStoredData("Height", self._current_height)
except Exception as e:
logger.warn("Could not process ::: %s " % e)
finally:
self._lock.release() | Method called on a loop to check the current height of the blockchain. If the height of the blockchain
is more than the current stored height in the wallet, we get the next block in line and
processes it.
In the case that the wallet height is far behind the height of the blockchain, we do this 1000
blocks at a time.
Args:
block_limit (int): the number of blocks to process synchronously. defaults to 1000. set to 0 to block until the wallet is fully rebuilt. | Below is the the instruction that describes the task:
### Input:
Method called on a loop to check the current height of the blockchain. If the height of the blockchain
is more than the current stored height in the wallet, we get the next block in line and
processes it.
In the case that the wallet height is far behind the height of the blockchain, we do this 1000
blocks at a time.
Args:
block_limit (int): the number of blocks to process synchronously. defaults to 1000. set to 0 to block until the wallet is fully rebuilt.
### Response:
def ProcessBlocks(self, block_limit=1000):
"""
Method called on a loop to check the current height of the blockchain. If the height of the blockchain
is more than the current stored height in the wallet, we get the next block in line and
processes it.
In the case that the wallet height is far behind the height of the blockchain, we do this 1000
blocks at a time.
Args:
block_limit (int): the number of blocks to process synchronously. defaults to 1000. set to 0 to block until the wallet is fully rebuilt.
"""
self._lock.acquire()
try:
blockcount = 0
while self._current_height <= Blockchain.Default().Height and (block_limit == 0 or blockcount < block_limit):
block = Blockchain.Default().GetBlockByHeight(self._current_height)
if block is not None:
self.ProcessNewBlock(block)
else:
self._current_height += 1
blockcount += 1
self.SaveStoredData("Height", self._current_height)
except Exception as e:
logger.warn("Could not process ::: %s " % e)
finally:
self._lock.release() |
def GetFormatterObject(cls, data_type):
"""Retrieves the formatter object for a specific data type.
Args:
data_type (str): data type.
Returns:
EventFormatter: corresponding formatter or the default formatter if
not available.
"""
data_type = data_type.lower()
if data_type not in cls._formatter_objects:
formatter_object = None
if data_type in cls._formatter_classes:
formatter_class = cls._formatter_classes[data_type]
# TODO: remove the need to instantiate the Formatter classes
# and use class methods only.
formatter_object = formatter_class()
if not formatter_object:
logger.warning(
'Using default formatter for data type: {0:s}'.format(data_type))
formatter_object = default.DefaultFormatter()
cls._formatter_objects[data_type] = formatter_object
return cls._formatter_objects[data_type] | Retrieves the formatter object for a specific data type.
Args:
data_type (str): data type.
Returns:
EventFormatter: corresponding formatter or the default formatter if
not available. | Below is the the instruction that describes the task:
### Input:
Retrieves the formatter object for a specific data type.
Args:
data_type (str): data type.
Returns:
EventFormatter: corresponding formatter or the default formatter if
not available.
### Response:
def GetFormatterObject(cls, data_type):
"""Retrieves the formatter object for a specific data type.
Args:
data_type (str): data type.
Returns:
EventFormatter: corresponding formatter or the default formatter if
not available.
"""
data_type = data_type.lower()
if data_type not in cls._formatter_objects:
formatter_object = None
if data_type in cls._formatter_classes:
formatter_class = cls._formatter_classes[data_type]
# TODO: remove the need to instantiate the Formatter classes
# and use class methods only.
formatter_object = formatter_class()
if not formatter_object:
logger.warning(
'Using default formatter for data type: {0:s}'.format(data_type))
formatter_object = default.DefaultFormatter()
cls._formatter_objects[data_type] = formatter_object
return cls._formatter_objects[data_type] |
def _get_closest_matches(input_attributes, target_attributes):
"""
:param input_attributes: First dictionary of objects to attribute tuples.
:param target_attributes: Second dictionary of blocks to attribute tuples.
:returns: A dictionary of objects in the input_attributes to the closest objects in the
target_attributes.
"""
closest_matches = {}
# for each object in the first set find the objects with the closest target attributes
for a in input_attributes:
best_dist = float('inf')
best_matches = []
for b in target_attributes:
dist = _euclidean_dist(input_attributes[a], target_attributes[b])
if dist < best_dist:
best_matches = [b]
best_dist = dist
elif dist == best_dist:
best_matches.append(b)
closest_matches[a] = best_matches
return closest_matches | :param input_attributes: First dictionary of objects to attribute tuples.
:param target_attributes: Second dictionary of blocks to attribute tuples.
:returns: A dictionary of objects in the input_attributes to the closest objects in the
target_attributes. | Below is the the instruction that describes the task:
### Input:
:param input_attributes: First dictionary of objects to attribute tuples.
:param target_attributes: Second dictionary of blocks to attribute tuples.
:returns: A dictionary of objects in the input_attributes to the closest objects in the
target_attributes.
### Response:
def _get_closest_matches(input_attributes, target_attributes):
"""
:param input_attributes: First dictionary of objects to attribute tuples.
:param target_attributes: Second dictionary of blocks to attribute tuples.
:returns: A dictionary of objects in the input_attributes to the closest objects in the
target_attributes.
"""
closest_matches = {}
# for each object in the first set find the objects with the closest target attributes
for a in input_attributes:
best_dist = float('inf')
best_matches = []
for b in target_attributes:
dist = _euclidean_dist(input_attributes[a], target_attributes[b])
if dist < best_dist:
best_matches = [b]
best_dist = dist
elif dist == best_dist:
best_matches.append(b)
closest_matches[a] = best_matches
return closest_matches |
def _encode(self, data, algorithm, key=None):
'''Encode data with specific algorithm'''
if algorithm['type'] == 'hmac':
return data + self._hmac_generate(data, algorithm, key)
elif algorithm['type'] == 'aes':
return self._aes_encrypt(data, algorithm, key)
elif algorithm['type'] == 'no-serialization':
return data
elif algorithm['type'] == 'json':
return json.dumps(data)
elif algorithm['type'] == 'no-compression':
return data
elif algorithm['type'] == 'gzip':
return self._zlib_compress(data, algorithm)
else:
raise Exception('Algorithm not supported: %s' % algorithm['type']) | Encode data with specific algorithm | Below is the the instruction that describes the task:
### Input:
Encode data with specific algorithm
### Response:
def _encode(self, data, algorithm, key=None):
'''Encode data with specific algorithm'''
if algorithm['type'] == 'hmac':
return data + self._hmac_generate(data, algorithm, key)
elif algorithm['type'] == 'aes':
return self._aes_encrypt(data, algorithm, key)
elif algorithm['type'] == 'no-serialization':
return data
elif algorithm['type'] == 'json':
return json.dumps(data)
elif algorithm['type'] == 'no-compression':
return data
elif algorithm['type'] == 'gzip':
return self._zlib_compress(data, algorithm)
else:
raise Exception('Algorithm not supported: %s' % algorithm['type']) |
def _html_to_img_tuples(html:str, format:str='jpg', n_images:int=10) -> list:
"Parse the google images html to img tuples containining `(fname, url)`"
bs = BeautifulSoup(html, 'html.parser')
img_tags = bs.find_all('div', {'class': 'rg_meta'})
metadata_dicts = (json.loads(e.text) for e in img_tags)
img_tuples = ((_img_fname(d['ou']), d['ou']) for d in metadata_dicts if d['ity'] == format)
return list(itertools.islice(img_tuples, n_images)) | Parse the google images html to img tuples containining `(fname, url)` | Below is the the instruction that describes the task:
### Input:
Parse the google images html to img tuples containining `(fname, url)`
### Response:
def _html_to_img_tuples(html:str, format:str='jpg', n_images:int=10) -> list:
"Parse the google images html to img tuples containining `(fname, url)`"
bs = BeautifulSoup(html, 'html.parser')
img_tags = bs.find_all('div', {'class': 'rg_meta'})
metadata_dicts = (json.loads(e.text) for e in img_tags)
img_tuples = ((_img_fname(d['ou']), d['ou']) for d in metadata_dicts if d['ity'] == format)
return list(itertools.islice(img_tuples, n_images)) |
def set_default (feature, value):
""" Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign
"""
f = __all_features[feature]
bad_attribute = None
if f.free:
bad_attribute = "free"
elif f.optional:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, f.name))
if value not in f.values:
raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values)
f.set_default(value) | Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign | Below is the the instruction that describes the task:
### Input:
Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign
### Response:
def set_default (feature, value):
""" Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign
"""
f = __all_features[feature]
bad_attribute = None
if f.free:
bad_attribute = "free"
elif f.optional:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, f.name))
if value not in f.values:
raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values)
f.set_default(value) |
def clear_score_system(self):
"""Clears the score system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_score_system_metadata().is_read_only() or
self.get_score_system_metadata().is_required()):
raise errors.NoAccess()
self._my_map['scoreSystemId'] = self._score_system_default | Clears the score system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Clears the score system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
### Response:
def clear_score_system(self):
"""Clears the score system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_score_system_metadata().is_read_only() or
self.get_score_system_metadata().is_required()):
raise errors.NoAccess()
self._my_map['scoreSystemId'] = self._score_system_default |
async def disconnect(self):
"""Shut down the watcher task and close websockets.
"""
if not self._watch_stopped.is_set():
log.debug('Stopping watcher task')
self._watch_stopping.set()
await self._watch_stopped.wait()
self._watch_stopping.clear()
if self.is_connected():
log.debug('Closing model connection')
await self._connector.disconnect()
self._info = None | Shut down the watcher task and close websockets. | Below is the the instruction that describes the task:
### Input:
Shut down the watcher task and close websockets.
### Response:
async def disconnect(self):
"""Shut down the watcher task and close websockets.
"""
if not self._watch_stopped.is_set():
log.debug('Stopping watcher task')
self._watch_stopping.set()
await self._watch_stopped.wait()
self._watch_stopping.clear()
if self.is_connected():
log.debug('Closing model connection')
await self._connector.disconnect()
self._info = None |
def image(self):
"""
Attempts to provide a representative image from a content_object based on
the content object's get_image() method.
If there is a another content.object, as in the case of comments and other GFKs,
then it will follow to that content_object and then get the image.
Requires get_image() to be defined on the related model even if it just
returns object.image, to avoid bringing back images you may not want.
Note that this expects the image only. Anything related (caption, etc) should be stripped.
"""
obj = self.content_object
# First, try to get from a get_image() helper method
try:
image = obj.get_image()
except AttributeError:
try:
image = obj.content_object.get_image()
except:
image = None
# if we didn't find one, try to get it from foo.image
# This allows get_image to take precedence for greater control.
if not image:
try:
image = obj.image
except AttributeError:
try:
image = obj.content_object.image
except:
return None
# Finally, ensure we're getting an image, not an image object
# with caption and byline and other things.
try:
return image.image
except AttributeError:
return image | Attempts to provide a representative image from a content_object based on
the content object's get_image() method.
If there is a another content.object, as in the case of comments and other GFKs,
then it will follow to that content_object and then get the image.
Requires get_image() to be defined on the related model even if it just
returns object.image, to avoid bringing back images you may not want.
Note that this expects the image only. Anything related (caption, etc) should be stripped. | Below is the the instruction that describes the task:
### Input:
Attempts to provide a representative image from a content_object based on
the content object's get_image() method.
If there is a another content.object, as in the case of comments and other GFKs,
then it will follow to that content_object and then get the image.
Requires get_image() to be defined on the related model even if it just
returns object.image, to avoid bringing back images you may not want.
Note that this expects the image only. Anything related (caption, etc) should be stripped.
### Response:
def image(self):
"""
Attempts to provide a representative image from a content_object based on
the content object's get_image() method.
If there is a another content.object, as in the case of comments and other GFKs,
then it will follow to that content_object and then get the image.
Requires get_image() to be defined on the related model even if it just
returns object.image, to avoid bringing back images you may not want.
Note that this expects the image only. Anything related (caption, etc) should be stripped.
"""
obj = self.content_object
# First, try to get from a get_image() helper method
try:
image = obj.get_image()
except AttributeError:
try:
image = obj.content_object.get_image()
except:
image = None
# if we didn't find one, try to get it from foo.image
# This allows get_image to take precedence for greater control.
if not image:
try:
image = obj.image
except AttributeError:
try:
image = obj.content_object.image
except:
return None
# Finally, ensure we're getting an image, not an image object
# with caption and byline and other things.
try:
return image.image
except AttributeError:
return image |
def set_data(self, data):
"""Set model data"""
self._data = data
keys = list(data.keys())
self.breakpoints = []
for key in keys:
bp_list = data[key]
if bp_list:
for item in data[key]:
self.breakpoints.append((key, item[0], item[1], ""))
self.reset() | Set model data | Below is the the instruction that describes the task:
### Input:
Set model data
### Response:
def set_data(self, data):
"""Set model data"""
self._data = data
keys = list(data.keys())
self.breakpoints = []
for key in keys:
bp_list = data[key]
if bp_list:
for item in data[key]:
self.breakpoints.append((key, item[0], item[1], ""))
self.reset() |
def build(self, signing_private_key):
"""
Validates the certificate information, constructs an X.509 certificate
and then signs it
:param signing_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the request with. This should be
the private key that matches the public key.
:return:
An asn1crypto.csr.CertificationRequest object of the request
"""
is_oscrypto = isinstance(signing_private_key, asymmetric.PrivateKey)
if not isinstance(signing_private_key, keys.PrivateKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
'''
signing_private_key must be an instance of
asn1crypto.keys.PrivateKeyInfo or
oscrypto.asymmetric.PrivateKey, not %s
''',
_type_name(signing_private_key)
))
signature_algo = signing_private_key.algorithm
if signature_algo == 'ec':
signature_algo = 'ecdsa'
signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)
def _make_extension(name, value):
return {
'extn_id': name,
'critical': self._determine_critical(name),
'extn_value': value
}
extensions = []
for name in sorted(self._special_extensions):
value = getattr(self, '_%s' % name)
if value is not None:
extensions.append(_make_extension(name, value))
for name in sorted(self._other_extensions.keys()):
extensions.append(_make_extension(name, self._other_extensions[name]))
attributes = []
if extensions:
attributes.append({
'type': 'extension_request',
'values': [extensions]
})
certification_request_info = csr.CertificationRequestInfo({
'version': 'v1',
'subject': self._subject,
'subject_pk_info': self._subject_public_key,
'attributes': attributes
})
if signing_private_key.algorithm == 'rsa':
sign_func = asymmetric.rsa_pkcs1v15_sign
elif signing_private_key.algorithm == 'dsa':
sign_func = asymmetric.dsa_sign
elif signing_private_key.algorithm == 'ec':
sign_func = asymmetric.ecdsa_sign
if not is_oscrypto:
signing_private_key = asymmetric.load_private_key(signing_private_key)
signature = sign_func(signing_private_key, certification_request_info.dump(), self._hash_algo)
return csr.CertificationRequest({
'certification_request_info': certification_request_info,
'signature_algorithm': {
'algorithm': signature_algorithm_id,
},
'signature': signature
}) | Validates the certificate information, constructs an X.509 certificate
and then signs it
:param signing_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the request with. This should be
the private key that matches the public key.
:return:
An asn1crypto.csr.CertificationRequest object of the request | Below is the the instruction that describes the task:
### Input:
Validates the certificate information, constructs an X.509 certificate
and then signs it
:param signing_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the request with. This should be
the private key that matches the public key.
:return:
An asn1crypto.csr.CertificationRequest object of the request
### Response:
def build(self, signing_private_key):
"""
Validates the certificate information, constructs an X.509 certificate
and then signs it
:param signing_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the request with. This should be
the private key that matches the public key.
:return:
An asn1crypto.csr.CertificationRequest object of the request
"""
is_oscrypto = isinstance(signing_private_key, asymmetric.PrivateKey)
if not isinstance(signing_private_key, keys.PrivateKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
'''
signing_private_key must be an instance of
asn1crypto.keys.PrivateKeyInfo or
oscrypto.asymmetric.PrivateKey, not %s
''',
_type_name(signing_private_key)
))
signature_algo = signing_private_key.algorithm
if signature_algo == 'ec':
signature_algo = 'ecdsa'
signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)
def _make_extension(name, value):
return {
'extn_id': name,
'critical': self._determine_critical(name),
'extn_value': value
}
extensions = []
for name in sorted(self._special_extensions):
value = getattr(self, '_%s' % name)
if value is not None:
extensions.append(_make_extension(name, value))
for name in sorted(self._other_extensions.keys()):
extensions.append(_make_extension(name, self._other_extensions[name]))
attributes = []
if extensions:
attributes.append({
'type': 'extension_request',
'values': [extensions]
})
certification_request_info = csr.CertificationRequestInfo({
'version': 'v1',
'subject': self._subject,
'subject_pk_info': self._subject_public_key,
'attributes': attributes
})
if signing_private_key.algorithm == 'rsa':
sign_func = asymmetric.rsa_pkcs1v15_sign
elif signing_private_key.algorithm == 'dsa':
sign_func = asymmetric.dsa_sign
elif signing_private_key.algorithm == 'ec':
sign_func = asymmetric.ecdsa_sign
if not is_oscrypto:
signing_private_key = asymmetric.load_private_key(signing_private_key)
signature = sign_func(signing_private_key, certification_request_info.dump(), self._hash_algo)
return csr.CertificationRequest({
'certification_request_info': certification_request_info,
'signature_algorithm': {
'algorithm': signature_algorithm_id,
},
'signature': signature
}) |
def conditional_write(strm, fmt, value, *args, **kwargs):
"""Write to stream using fmt and value if value is not None"""
if value is not None:
strm.write(fmt.format(value, *args, **kwargs)) | Write to stream using fmt and value if value is not None | Below is the the instruction that describes the task:
### Input:
Write to stream using fmt and value if value is not None
### Response:
def conditional_write(strm, fmt, value, *args, **kwargs):
"""Write to stream using fmt and value if value is not None"""
if value is not None:
strm.write(fmt.format(value, *args, **kwargs)) |
def update_machine_group(self, project_name, group_detail):
""" update machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: MachineGroupDetail
:param group_detail: the machine group detail config
:return: UpdateMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_detail.group_name
headers['Content-Type'] = 'application/json'
body = six.b(json.dumps(group_detail.to_json()))
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateMachineGroupResponse(headers, resp) | update machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: MachineGroupDetail
:param group_detail: the machine group detail config
:return: UpdateMachineGroupResponse
:raise: LogException | Below is the the instruction that describes the task:
### Input:
update machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: MachineGroupDetail
:param group_detail: the machine group detail config
:return: UpdateMachineGroupResponse
:raise: LogException
### Response:
def update_machine_group(self, project_name, group_detail):
""" update machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: MachineGroupDetail
:param group_detail: the machine group detail config
:return: UpdateMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_detail.group_name
headers['Content-Type'] = 'application/json'
body = six.b(json.dumps(group_detail.to_json()))
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateMachineGroupResponse(headers, resp) |
def addFileHandler(self,filename='', dr='',lvl=1):
"""
This function will add a file handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1.
"""
fname = self.name
if filename != '':
fname = filename
if '.' not in fname:
fname+='.log'
fh = logging.FileHandler(os.path.join(dr,fname))
fh.setLevel(lvl)
frmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
fFrmt = logging.Formatter(frmtString)
fh.setFormatter(fFrmt)
self.addHandler(fh) | This function will add a file handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1. | Below is the the instruction that describes the task:
### Input:
This function will add a file handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1.
### Response:
def addFileHandler(self,filename='', dr='',lvl=1):
"""
This function will add a file handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1.
"""
fname = self.name
if filename != '':
fname = filename
if '.' not in fname:
fname+='.log'
fh = logging.FileHandler(os.path.join(dr,fname))
fh.setLevel(lvl)
frmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
fFrmt = logging.Formatter(frmtString)
fh.setFormatter(fFrmt)
self.addHandler(fh) |
def _dispatch_commands(self, from_state, to_state, smtp_command):
"""This method dispatches a SMTP command to the appropriate handler
method. It is called after a new command was received and a valid
transition was found."""
#print from_state, ' -> ', to_state, ':', smtp_command
name_handler_method = 'smtp_%s' % smtp_command.lower().replace(' ', '_')
try:
handler_method = getattr(self, name_handler_method)
except AttributeError:
# base_msg = 'No handler for %s though transition is defined (no method %s)'
# print base_msg % (smtp_command, name_handler_method)
self.reply(451, 'Temporary Local Problem: Please come back later')
else:
# Don't catch InvalidDataError here - else the state would be moved
# forward. Instead the handle_input will catch it and send out the
# appropriate reply.
handler_method() | This method dispatches a SMTP command to the appropriate handler
method. It is called after a new command was received and a valid
transition was found. | Below is the the instruction that describes the task:
### Input:
This method dispatches a SMTP command to the appropriate handler
method. It is called after a new command was received and a valid
transition was found.
### Response:
def _dispatch_commands(self, from_state, to_state, smtp_command):
"""This method dispatches a SMTP command to the appropriate handler
method. It is called after a new command was received and a valid
transition was found."""
#print from_state, ' -> ', to_state, ':', smtp_command
name_handler_method = 'smtp_%s' % smtp_command.lower().replace(' ', '_')
try:
handler_method = getattr(self, name_handler_method)
except AttributeError:
# base_msg = 'No handler for %s though transition is defined (no method %s)'
# print base_msg % (smtp_command, name_handler_method)
self.reply(451, 'Temporary Local Problem: Please come back later')
else:
# Don't catch InvalidDataError here - else the state would be moved
# forward. Instead the handle_input will catch it and send out the
# appropriate reply.
handler_method() |
def download_from_plugin(plugin: APlugin):
"""
Download routine.
1. get newest update time
2. load savestate
3. compare last update time with savestate time
4. get download links
5. compare with savestate
6. download new/updated data
7. check downloads
8. update savestate
9. write new savestate
:param plugin: plugin
:type plugin: ~unidown.plugin.a_plugin.APlugin
"""
# get last update date
plugin.log.info('Get last update')
plugin.update_last_update()
# load old save state
save_state = plugin.load_save_state()
if plugin.last_update <= save_state.last_update:
plugin.log.info('No update. Nothing to do.')
return
# get download links
plugin.log.info('Get download links')
plugin.update_download_links()
# compare with save state
down_link_item_dict = plugin.get_updated_data(save_state.link_item_dict)
plugin.log.info('Compared with save state: ' + str(len(plugin.download_data)))
if not down_link_item_dict:
plugin.log.info('No new data. Nothing to do.')
return
# download new/updated data
plugin.log.info(f"Download new {plugin.unit}s: {len(down_link_item_dict)}")
plugin.download(down_link_item_dict, plugin.download_path, 'Download new ' + plugin.unit + 's', plugin.unit)
# check which downloads are succeeded
succeed_link_item_dict, lost_link_item_dict = plugin.check_download(down_link_item_dict, plugin.download_path)
plugin.log.info(f"Downloaded: {len(succeed_link_item_dict)}/{len(down_link_item_dict)}")
# update savestate link_item_dict with succeeded downloads dict
plugin.log.info('Update savestate')
plugin.update_dict(save_state.link_item_dict, succeed_link_item_dict)
# write new savestate
plugin.log.info('Write savestate')
plugin.save_save_state(save_state.link_item_dict) | Download routine.
1. get newest update time
2. load savestate
3. compare last update time with savestate time
4. get download links
5. compare with savestate
6. download new/updated data
7. check downloads
8. update savestate
9. write new savestate
:param plugin: plugin
:type plugin: ~unidown.plugin.a_plugin.APlugin | Below is the the instruction that describes the task:
### Input:
Download routine.
1. get newest update time
2. load savestate
3. compare last update time with savestate time
4. get download links
5. compare with savestate
6. download new/updated data
7. check downloads
8. update savestate
9. write new savestate
:param plugin: plugin
:type plugin: ~unidown.plugin.a_plugin.APlugin
### Response:
def download_from_plugin(plugin: APlugin):
"""
Download routine.
1. get newest update time
2. load savestate
3. compare last update time with savestate time
4. get download links
5. compare with savestate
6. download new/updated data
7. check downloads
8. update savestate
9. write new savestate
:param plugin: plugin
:type plugin: ~unidown.plugin.a_plugin.APlugin
"""
# get last update date
plugin.log.info('Get last update')
plugin.update_last_update()
# load old save state
save_state = plugin.load_save_state()
if plugin.last_update <= save_state.last_update:
plugin.log.info('No update. Nothing to do.')
return
# get download links
plugin.log.info('Get download links')
plugin.update_download_links()
# compare with save state
down_link_item_dict = plugin.get_updated_data(save_state.link_item_dict)
plugin.log.info('Compared with save state: ' + str(len(plugin.download_data)))
if not down_link_item_dict:
plugin.log.info('No new data. Nothing to do.')
return
# download new/updated data
plugin.log.info(f"Download new {plugin.unit}s: {len(down_link_item_dict)}")
plugin.download(down_link_item_dict, plugin.download_path, 'Download new ' + plugin.unit + 's', plugin.unit)
# check which downloads are succeeded
succeed_link_item_dict, lost_link_item_dict = plugin.check_download(down_link_item_dict, plugin.download_path)
plugin.log.info(f"Downloaded: {len(succeed_link_item_dict)}/{len(down_link_item_dict)}")
# update savestate link_item_dict with succeeded downloads dict
plugin.log.info('Update savestate')
plugin.update_dict(save_state.link_item_dict, succeed_link_item_dict)
# write new savestate
plugin.log.info('Write savestate')
plugin.save_save_state(save_state.link_item_dict) |
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags) | Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True) | Below is the the instruction that describes the task:
### Input:
Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)
### Response:
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags) |
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
s = str(self.allval())
return self.parse(s[:2]+ ''.join(['Z']*len(s[2:]))) | Convert all of the values to their max values. This form is used to represent the summary level | Below is the the instruction that describes the task:
### Input:
Convert all of the values to their max values. This form is used to represent the summary level
### Response:
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
s = str(self.allval())
return self.parse(s[:2]+ ''.join(['Z']*len(s[2:]))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.