code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def create(self, ip_access_control_list_sid):
"""
Create a new IpAccessControlListMappingInstance
:param unicode ip_access_control_list_sid: The unique id of the IP access control list to map to the SIP domain
:returns: Newly created IpAccessControlListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.ip_access_control_list_mapping.IpAccessControlListMappingInstance
"""
data = values.of({'IpAccessControlListSid': ip_access_control_list_sid, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return IpAccessControlListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
) | Create a new IpAccessControlListMappingInstance
:param unicode ip_access_control_list_sid: The unique id of the IP access control list to map to the SIP domain
:returns: Newly created IpAccessControlListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.ip_access_control_list_mapping.IpAccessControlListMappingInstance |
def search(self, entity_type, property_name, search_string, start_index=0, max_results=99999):
"""Performs a user search using the Crowd search API.
https://developer.atlassian.com/display/CROWDDEV/Crowd+REST+Resources#CrowdRESTResources-SearchResource
Args:
entity_type: 'user' or 'group'
property_name: eg. 'email', 'name'
search_string: the string to search for.
start_index: starting index of the results (default: 0)
max_results: maximum number of results returned (default: 99999)
Returns:
json results:
Returns search results.
"""
params = {
"entity-type": entity_type,
"expand": entity_type,
"property-search-restriction": {
"property": {"name": property_name, "type": "STRING"},
"match-mode": "CONTAINS",
"value": search_string,
}
}
params = {
'entity-type': entity_type,
'expand': entity_type,
'start-index': start_index,
'max-results': max_results
}
# Construct XML payload of the form:
# <property-search-restriction>
# <property>
# <name>email</name>
# <type>STRING</type>
# </property>
# <match-mode>EXACTLY_MATCHES</match-mode>
# <value>[email protected]</value>
# </property-search-restriction>
root = etree.Element('property-search-restriction')
property_ = etree.Element('property')
prop_name = etree.Element('name')
prop_name.text = property_name
property_.append(prop_name)
prop_type = etree.Element('type')
prop_type.text = 'STRING'
property_.append(prop_type)
root.append(property_)
match_mode = etree.Element('match-mode')
match_mode.text = 'CONTAINS'
root.append(match_mode)
value = etree.Element('value')
value.text = search_string
root.append(value)
# Construct the XML payload expected by search API
payload = '<?xml version="1.0" encoding="UTF-8"?>\n' + etree.tostring(root).decode('utf-8')
# We're sending XML but would like a JSON response
session = self._build_session(content_type='xml')
session.headers.update({'Accept': 'application/json'})
response = session.post(self.rest_url + "/search", params=params, data=payload, timeout=self.timeout)
if not response.ok:
return None
return response.json() | Performs a user search using the Crowd search API.
https://developer.atlassian.com/display/CROWDDEV/Crowd+REST+Resources#CrowdRESTResources-SearchResource
Args:
entity_type: 'user' or 'group'
property_name: eg. 'email', 'name'
search_string: the string to search for.
start_index: starting index of the results (default: 0)
max_results: maximum number of results returned (default: 99999)
Returns:
json results:
Returns search results. |
def get_redirect_target():
"""Get URL to redirect to and ensure that it is local."""
for target in request.values.get('next'), request.referrer:
if target and is_local_url(target):
return target | Get URL to redirect to and ensure that it is local. |
def filename(self):
"""Defines the name of the configuration file to use."""
# Needs to be done this way to be used by the project config.
# To fix on a later PR
self._filename = getattr(self, '_filename', None)
self._root_path = getattr(self, '_root_path', None)
if self._filename is None and self._root_path is None:
return self._filename_global()
else:
return self._filename_projects() | Defines the name of the configuration file to use. |
def _find_zone_by_id(self, zone_id):
"""Return zone by id."""
if not self.zones:
return None
zone = list(filter(
lambda zone: zone.id == zone_id, self.zones))
return zone[0] if zone else None | Return zone by id. |
def find_two_letter_edits(word_string):
'''
Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (e2 for e1 in find_one_letter_edits(word_string) for e2 in find_one_letter_edits(e1))
else:
raise InputError("string or none type variable not passed as argument to find_two_letter_edits") | Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance. |
def copy(self, *args, **kwargs):
"""Copy this model element and contained elements if they exist."""
for slot in self.__slots__:
attr = getattr(self, slot)
if slot[0] == '_': # convert protected attribute name to public
slot = slot[1:]
if slot not in kwargs:
kwargs[slot] = attr
result = type(self)(*args, **kwargs)
return result | Copy this model element and contained elements if they exist. |
def __on_message(self, ws, msg):
"""This function is called whenever there is a message received from the server"""
msg = json.loads(msg)
logging.debug("ConnectorDB:WS: Msg '%s'", msg["stream"])
# Build the subcription key
stream_key = msg["stream"] + ":"
if "transform" in msg:
stream_key += msg["transform"]
self.subscription_lock.acquire()
if stream_key in self.subscriptions:
subscription_function = self.subscriptions[stream_key]
self.subscription_lock.release()
fresult = subscription_function(msg["stream"], msg["data"])
if fresult is True:
# This is a special result - if the subscription function of a downlink returns True,
# then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream)
fresult = msg["data"]
if fresult is not False and fresult is not None and msg["stream"].endswith(
"/downlink") and msg["stream"].count("/") == 3:
# If the above conditions are true, it means that the datapoints were from a downlink,
# and the subscriber function chooses to acknowledge them, so we reinsert them.
self.insert(msg["stream"][:-9], fresult)
else:
self.subscription_lock.release()
logging.warn(
"ConnectorDB:WS: Msg '%s' not subscribed! Subscriptions: %s",
msg["stream"], list(self.subscriptions.keys())) | This function is called whenever there is a message received from the server |
def _attach_params(self, params, **kwargs):
"""Attach a list of parameters (or ParameterSet) to this ParameterSet.
:parameter list params: list of parameters, or ParameterSet
:parameter **kwargs: attributes to set for each parameter (ie tags)
"""
lst = params.to_list() if isinstance(params, ParameterSet) else params
for param in lst:
param._bundle = self
for k, v in kwargs.items():
# Here we'll set the attributes (_context, _qualifier, etc)
if getattr(param, '_{}'.format(k)) is None:
setattr(param, '_{}'.format(k), v)
self._params.append(param)
self._check_copy_for()
return | Attach a list of parameters (or ParameterSet) to this ParameterSet.
:parameter list params: list of parameters, or ParameterSet
:parameter **kwargs: attributes to set for each parameter (ie tags) |
def _get_position_from_instance(self, instance, ordering):
"""
The position will be a tuple of values:
The QuerySet number inside of the QuerySetSequence.
Whatever the normal value taken from the ordering property gives.
"""
# Get the QuerySet number of the current instance.
qs_order = getattr(instance, '#')
# Strip the '#' and call the standard _get_position_from_instance.
result = super(SequenceCursorPagination, self)._get_position_from_instance(instance, ordering[1:])
# Return a tuple of these two elements.
return (qs_order, result) | The position will be a tuple of values:
The QuerySet number inside of the QuerySetSequence.
Whatever the normal value taken from the ordering property gives. |
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls | Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls) |
def GetCpuReservationMHz(self):
'''Retrieves the minimum processing power in MHz reserved for the virtual
machine. For information about setting a CPU reservation, see "Limits and
Reservations" on page 14.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuReservationMHz(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | Retrieves the minimum processing power in MHz reserved for the virtual
machine. For information about setting a CPU reservation, see "Limits and
Reservations" on page 14. |
def copy(self):
"""Return a copy of this actor with the same attribute values."""
health = self.health, self.health_max
r = self.r, self.r_max
g = self.g, self.g_max
b = self.b, self.b_max
y = self.y, self.y_max
x = self.x, self.x_max
m = self.m, self.m_max
h = self.h, self.h_max
c = self.c, self.c_max
return self.__class__(self.name, health, r, g, b, y, x, m, h, c) | Return a copy of this actor with the same attribute values. |
def _to_http_hosts(hosts: Union[Iterable[str], str]) -> List[str]:
"""Convert a string of whitespace or comma separated hosts into a list of hosts.
Hosts may also already be a list or other iterable.
Each host will be prefixed with 'http://' if it is not already there.
>>> _to_http_hosts('n1:4200,n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('n1:4200 n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('https://n1:4200')
['https://n1:4200']
>>> _to_http_hosts(['http://n1:4200', 'n2:4200'])
['http://n1:4200', 'http://n2:4200']
"""
if isinstance(hosts, str):
hosts = hosts.replace(',', ' ').split()
return [_to_http_uri(i) for i in hosts] | Convert a string of whitespace or comma separated hosts into a list of hosts.
Hosts may also already be a list or other iterable.
Each host will be prefixed with 'http://' if it is not already there.
>>> _to_http_hosts('n1:4200,n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('n1:4200 n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('https://n1:4200')
['https://n1:4200']
>>> _to_http_hosts(['http://n1:4200', 'n2:4200'])
['http://n1:4200', 'http://n2:4200'] |
def upload(self, array, fields=None, table="MyDB", configfile=None):
"""
Upload an array to a personal database using SOAP POST protocol.
http://skyserver.sdss3.org/casjobs/services/jobs.asmx?op=UploadData
"""
wsid=''
password=''
if configfile is None:
configfile = "CasJobs.config"
logger.info("Reading config file: %s"%configfile)
lines = open(configfile,'r').readlines()
for line in lines:
k,v = line.strip().split('=')
if k == 'wsid': wsid = v
if k == 'password': password = v
logger.info("Attempting to drop table: %s"%table)
self.drop(table)
SOAP_TEMPLATE = """
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<UploadData xmlns="http://Services.Cas.jhu.edu">
<wsid>%s</wsid>
<pw>%s</pw>
<tableName>%s</tableName>
<data>%s</data>
<tableExists>%s</tableExists>
</UploadData>
</soap12:Body>
</soap12:Envelope>
"""
logger.info("Writing array...")
s = io.StringIO()
np.savetxt(s,array,delimiter=',',fmt="%.10g")
tb_data = ''
if fields is not None:
tb_data += ','.join(f for f in fields)+'\n'
tb_data += s.getvalue()
message = SOAP_TEMPLATE % (wsid, password, table, tb_data, "false")
#construct and send the header
webservice = httpcl.HTTP("skyserver.sdss3.org")
webservice.putrequest("POST", "/casjobs/services/jobs.asmx")
webservice.putheader("Host", "skyserver.sdss3.org")
webservice.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
webservice.putheader("Content-length", "%d" % len(message))
webservice.endheaders()
logger.info("Sending SOAP POST message...")
webservice.send(message)
# get the response
statuscode, statusmessage, header = webservice.getreply()
print("Response: ", statuscode, statusmessage)
print("headers: ", header)
res = webservice.getfile().read()
print(res) | Upload an array to a personal database using SOAP POST protocol.
http://skyserver.sdss3.org/casjobs/services/jobs.asmx?op=UploadData |
def local_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size,
shard_files, output_format):
"""See batch_predict"""
# from . import predict as predict_module
from .prediction import predict as predict_module
if mode == 'evaluation':
model_dir = os.path.join(training_dir, 'evaluation_model')
elif mode == 'prediction':
model_dir = os.path.join(training_dir, 'model')
else:
raise ValueError('mode must be evaluation or prediction')
if not file_io.file_exists(model_dir):
raise ValueError('Model folder %s does not exist' % model_dir)
cmd = ['predict.py',
'--predict-data=%s' % prediction_input_file,
'--trained-model-dir=%s' % model_dir,
'--output-dir=%s' % output_dir,
'--output-format=%s' % output_format,
'--batch-size=%s' % str(batch_size),
'--shard-files' if shard_files else '--no-shard-files',
'--has-target' if mode == 'evaluation' else '--no-has-target'
]
# return predict_module.predict.main(cmd)
return predict_module.main(cmd) | See batch_predict |
def _handle_final_metric_data(self, data):
"""Call tuner to process final results
"""
id_ = data['parameter_id']
value = data['value']
if id_ in _customized_parameter_ids:
self.tuner.receive_customized_trial_result(id_, _trial_params[id_], value)
else:
self.tuner.receive_trial_result(id_, _trial_params[id_], value) | Call tuner to process final results |
def append(self, header, f, _left=False):
"""Add a column to the table.
Args:
header (str):
Column header
f (function(datum)->str):
Makes the row string from the datum. Str returned by f should
have the same width as header.
"""
self.items_length += len(header)
if _left:
self.deque.appendleft((header, f))
else:
self.deque.append((header, f)) | Add a column to the table.
Args:
header (str):
Column header
f (function(datum)->str):
Makes the row string from the datum. Str returned by f should
have the same width as header. |
def SpiceUDREPU(f):
"""
Decorator for wrapping python functions in spice udrepu callback type
:param f: function to be wrapped
:type f: builtins.function
:return: wrapped udrepu function
:rtype: builtins.function
"""
@functools.wraps(f)
def wrapping_udrepu(beg, end, et):
f(beg, end, et)
return UDREPU(wrapping_udrepu) | Decorator for wrapping python functions in spice udrepu callback type
:param f: function to be wrapped
:type f: builtins.function
:return: wrapped udrepu function
:rtype: builtins.function |
def plot_slippage_sensitivity(returns, positions, transactions,
ax=None, **kwargs):
"""
Plots curve relating per-dollar slippage to average annual returns.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
avg_returns_given_slippage = pd.Series()
for bps in range(1, 100):
adj_returns = txn.adjust_returns_for_slippage(returns, positions,
transactions, bps)
avg_returns = ep.annual_return(adj_returns)
avg_returns_given_slippage.loc[bps] = avg_returns
avg_returns_given_slippage.plot(alpha=1.0, lw=2, ax=ax)
ax.set_title('Average annual returns given additional per-dollar slippage')
ax.set_xticks(np.arange(0, 100, 10))
ax.set_ylabel('Average annual return')
ax.set_xlabel('Per-dollar slippage (bps)')
return ax | Plots curve relating per-dollar slippage to average annual returns.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on. |
def validate(self):
"""
Ensure `self.path` has one of the extensions in `self.allowed_formats`.
"""
assert self.path, "{} must have a path".format(self.__class__.__name__)
ext = extract_path_ext(self.path, default_ext=self.subtitlesformat)
if ext not in self.allowed_formats and ext not in CONVERTIBLE_FORMATS[self.get_preset()]:
raise ValueError('Incompatible extension {} for SubtitleFile at {}'.format(ext, self.path)) | Ensure `self.path` has one of the extensions in `self.allowed_formats`. |
def dictionary(self) -> dict:
"""Get a python dictionary of contents."""
self.config.read(self.filepath)
return self.config._sections | Get a python dictionary of contents. |
def download_song_by_id(self, song_id, song_name, folder='.'):
"""Download a song by id and save it to disk.
:params song_id: song id.
:params song_name: song name.
:params folder: storage path.
"""
try:
url = self.crawler.get_song_url(song_id)
if self.lyric:
# use old api
lyric_info = self.crawler.get_song_lyric(song_id)
else:
lyric_info = None
song_name = song_name.replace('/', '')
song_name = song_name.replace('.', '')
self.crawler.get_song_by_url(url, song_name, folder, lyric_info)
except RequestException as exception:
click.echo(exception) | Download a song by id and save it to disk.
:params song_id: song id.
:params song_name: song name.
:params folder: storage path. |
def licenses_configured(name, licenses=None):
'''
Configures licenses on the cluster entity
Checks if each license exists on the server:
- if it doesn't, it creates it
Check if license is assigned to the cluster:
- if it's not assigned to the cluster:
- assign it to the cluster if there is space
- error if there's no space
- if it's assigned to the cluster nothing needs to be done
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': 'Default'}
if not licenses:
raise salt.exceptions.ArgumentValueError('No licenses provided')
cluster_name, datacenter_name = \
__salt__['esxcluster.get_details']()['cluster'], \
__salt__['esxcluster.get_details']()['datacenter']
display_name = '{0}/{1}'.format(datacenter_name, cluster_name)
log.info('Running licenses configured for \'%s\'', display_name)
log.trace('licenses = %s', licenses)
entity = {'type': 'cluster',
'datacenter': datacenter_name,
'cluster': cluster_name}
log.trace('entity = %s', entity)
comments = []
changes = {}
old_licenses = []
new_licenses = []
has_errors = False
needs_changes = False
try:
# Validate licenses
log.trace('Validating licenses')
schema = LicenseSchema.serialize()
try:
jsonschema.validate({'licenses': licenses}, schema)
except jsonschema.exceptions.ValidationError as exc:
raise salt.exceptions.InvalidLicenseError(exc)
si = __salt__['vsphere.get_service_instance_via_proxy']()
# Retrieve licenses
existing_licenses = __salt__['vsphere.list_licenses'](
service_instance=si)
remaining_licenses = existing_licenses[:]
# Cycle through licenses
for license_name, license in licenses.items():
# Check if license already exists
filtered_licenses = [l for l in existing_licenses
if l['key'] == license]
# TODO Update license description - not of interest right now
if not filtered_licenses:
# License doesn't exist - add and assign to cluster
needs_changes = True
if __opts__['test']:
# If it doesn't exist it clearly needs to be assigned as
# well so we can stop the check here
comments.append('State {0} will add license \'{1}\', '
'and assign it to cluster \'{2}\'.'
''.format(name, license_name, display_name))
log.info(comments[-1])
continue
else:
try:
existing_license = __salt__['vsphere.add_license'](
key=license, description=license_name,
service_instance=si)
except salt.exceptions.VMwareApiError as ex:
comments.append(ex.err_msg)
log.error(comments[-1])
has_errors = True
continue
comments.append('Added license \'{0}\'.'
''.format(license_name))
log.info(comments[-1])
else:
# License exists let's check if it's assigned to the cluster
comments.append('License \'{0}\' already exists. '
'Nothing to be done.'.format(license_name))
log.info(comments[-1])
existing_license = filtered_licenses[0]
log.trace('Checking licensed entities...')
assigned_licenses = __salt__['vsphere.list_assigned_licenses'](
entity=entity,
entity_display_name=display_name,
service_instance=si)
# Checking if any of the licenses already assigned have the same
# name as the new license; the already assigned license would be
# replaced by the new license
#
# Licenses with different names but matching features would be
# replaced as well, but searching for those would be very complex
#
# the name check if good enough for now
already_assigned_license = assigned_licenses[0] if \
assigned_licenses else None
if already_assigned_license and \
already_assigned_license['key'] == license:
# License is already assigned to entity
comments.append('License \'{0}\' already assigned to '
'cluster \'{1}\'. Nothing to be done.'
''.format(license_name, display_name))
log.info(comments[-1])
continue
needs_changes = True
# License needs to be assigned to entity
if existing_license['capacity'] <= existing_license['used']:
# License is already fully used
comments.append('Cannot assign license \'{0}\' to cluster '
'\'{1}\'. No free capacity available.'
''.format(license_name, display_name))
log.error(comments[-1])
has_errors = True
continue
# Assign license
if __opts__['test']:
comments.append('State {0} will assign license \'{1}\' '
'to cluster \'{2}\'.'.format(
name, license_name, display_name))
log.info(comments[-1])
else:
try:
__salt__['vsphere.assign_license'](
license_key=license,
license_name=license_name,
entity=entity,
entity_display_name=display_name,
service_instance=si)
except salt.exceptions.VMwareApiError as ex:
comments.append(ex.err_msg)
log.error(comments[-1])
has_errors = True
continue
comments.append('Assigned license \'{0}\' to cluster \'{1}\'.'
''.format(license_name, display_name))
log.info(comments[-1])
# Note: Because the already_assigned_license was retrieved
# from the assignment license manager it doesn't have a used
# value - that's a limitation from VMware. The license would
# need to be retrieved again from the license manager to get
# the value
# Hide license keys
assigned_license = __salt__['vsphere.list_assigned_licenses'](
entity=entity,
entity_display_name=display_name,
service_instance=si)[0]
assigned_license['key'] = '<hidden>'
if already_assigned_license:
already_assigned_license['key'] = '<hidden>'
if already_assigned_license and \
already_assigned_license['capacity'] == sys.maxsize:
already_assigned_license['capacity'] = 'Unlimited'
changes[license_name] = {'new': assigned_license,
'old': already_assigned_license}
continue
__salt__['vsphere.disconnect'](si)
ret.update({'result': True if (not needs_changes) else None if
__opts__['test'] else False if has_errors else True,
'comment': '\n'.join(comments),
'changes': changes if not __opts__['test'] else {}})
return ret
except salt.exceptions.CommandExecutionError as exc:
log.exception('Encountered error')
if si:
__salt__['vsphere.disconnect'](si)
ret.update({
'result': False,
'comment': exc.strerror})
return ret | Configures licenses on the cluster entity
Checks if each license exists on the server:
- if it doesn't, it creates it
Check if license is assigned to the cluster:
- if it's not assigned to the cluster:
- assign it to the cluster if there is space
- error if there's no space
- if it's assigned to the cluster nothing needs to be done |
def _execute_wk(*args, input=None):
"""
Generate path for the wkhtmltopdf binary and execute command.
:param args: args to pass straight to subprocess.Popen
:return: stdout, stderr
"""
wk_args = (WK_PATH,) + args
return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | Generate path for the wkhtmltopdf binary and execute command.
:param args: args to pass straight to subprocess.Popen
:return: stdout, stderr |
def path(self):
"""Return path
:returns: path
:rtype: str
:raises: None
"""
p = os.path.normpath(self._path)
if p.endswith(':'):
p = p + os.path.sep
return p | Return path
:returns: path
:rtype: str
:raises: None |
def ssh_reachable(self, tries=None, propagate_fail=True):
"""
Check if the VM is reachable with ssh
Args:
tries(int): Number of tries to try connecting to the host
propagate_fail(bool): If set to true, this event will appear
in the log and fail the outter stage. Otherwise, it will be
discarded.
Returns:
bool: True if the VM is reachable.
"""
if not self.running():
return False
try:
ssh.get_ssh_client(
ip_addr=self.ip(),
host_name=self.name(),
ssh_tries=tries,
propagate_fail=propagate_fail,
ssh_key=self.virt_env.prefix.paths.ssh_id_rsa(),
username=self._spec.get('ssh-user'),
password=self._spec.get('ssh-password'),
)
except ssh.LagoSSHTimeoutException:
return False
return True | Check if the VM is reachable with ssh
Args:
tries(int): Number of tries to try connecting to the host
propagate_fail(bool): If set to true, this event will appear
in the log and fail the outter stage. Otherwise, it will be
discarded.
Returns:
bool: True if the VM is reachable. |
def clear(self):
"""
Cleans up the manager. The manager can't be used after this method has
been called
"""
# Cancel timer
self.__cancel_timer()
self.__timer = None
self.__timer_args = None
self.__still_valid = False
self._value = None
super(TemporalDependency, self).clear() | Cleans up the manager. The manager can't be used after this method has
been called |
def next_k_array(a):
"""
Given an array `a` of k distinct nonnegative integers, sorted in
ascending order, return the next k-array in the lexicographic
ordering of the descending sequences of the elements [1]_. `a` is
modified in place.
Parameters
----------
a : ndarray(int, ndim=1)
Array of length k.
Returns
-------
a : ndarray(int, ndim=1)
View of `a`.
Examples
--------
Enumerate all the subsets with k elements of the set {0, ..., n-1}.
>>> n, k = 4, 2
>>> a = np.arange(k)
>>> while a[-1] < n:
... print(a)
... a = next_k_array(a)
...
[0 1]
[0 2]
[1 2]
[0 3]
[1 3]
[2 3]
References
----------
.. [1] `Combinatorial number system
<https://en.wikipedia.org/wiki/Combinatorial_number_system>`_,
Wikipedia.
"""
# Logic taken from Algotirhm T in D. Knuth, The Art of Computer
# Programming, Section 7.2.1.3 "Generating All Combinations".
k = len(a)
if k == 1 or a[0] + 1 < a[1]:
a[0] += 1
return a
a[0] = 0
i = 1
x = a[i] + 1
while i < k-1 and x == a[i+1]:
i += 1
a[i-1] = i - 1
x = a[i] + 1
a[i] = x
return a | Given an array `a` of k distinct nonnegative integers, sorted in
ascending order, return the next k-array in the lexicographic
ordering of the descending sequences of the elements [1]_. `a` is
modified in place.
Parameters
----------
a : ndarray(int, ndim=1)
Array of length k.
Returns
-------
a : ndarray(int, ndim=1)
View of `a`.
Examples
--------
Enumerate all the subsets with k elements of the set {0, ..., n-1}.
>>> n, k = 4, 2
>>> a = np.arange(k)
>>> while a[-1] < n:
... print(a)
... a = next_k_array(a)
...
[0 1]
[0 2]
[1 2]
[0 3]
[1 3]
[2 3]
References
----------
.. [1] `Combinatorial number system
<https://en.wikipedia.org/wiki/Combinatorial_number_system>`_,
Wikipedia. |
def autocorrelate(data, unbias=2, normalize=2):
"""
Compute the autocorrelation coefficients for time series data.
Here we use scipy.signal.correlate, but the results are the same as in
Yang, et al., 2012 for unbias=1:
"The autocorrelation coefficient refers to the correlation of a time
series with its own past or future values. iGAIT uses unbiased
autocorrelation coefficients of acceleration data to scale the regularity
and symmetry of gait.
The autocorrelation coefficients are divided by :math:`fc(0)`,
so that the autocorrelation coefficient is equal to :math:`1` when :math:`t=0`:
.. math::
NFC(t) = \\frac{fc(t)}{fc(0)}
Here :math:`NFC(t)` is the normalised autocorrelation coefficient, and :math:`fc(t)` are
autocorrelation coefficients."
:param data: time series data
:type data: numpy array
:param unbias: autocorrelation, divide by range (1) or by weighted range (2)
:type unbias: integer or None
:param normalize: divide by 1st coefficient (1) or by maximum abs. value (2)
:type normalize: integer or None
:return coefficients: autocorrelation coefficients [normalized, unbiased]
:rtype coefficients: numpy array
:return N: number of coefficients
:rtype N: integer
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import autocorrelate
>>> data = np.random.random(100)
>>> unbias = 2
>>> normalize = 2
>>> plot_test = True
>>> coefficients, N = autocorrelate(data, unbias, normalize, plot_test)
"""
# Autocorrelation:
coefficients = correlate(data, data, 'full')
size = np.int(coefficients.size/2)
coefficients = coefficients[size:]
N = coefficients.size
# Unbiased:
if unbias:
if unbias == 1:
coefficients /= (N - np.arange(N))
elif unbias == 2:
coefficient_ratio = coefficients[0]/coefficients[-1]
coefficients /= np.linspace(coefficient_ratio, 1, N)
else:
raise IOError("unbias should be set to 1, 2, or None")
# Normalize:
if normalize:
if normalize == 1:
coefficients /= np.abs(coefficients[0])
elif normalize == 2:
coefficients /= np.max(np.abs(coefficients))
else:
raise IOError("normalize should be set to 1, 2, or None")
return coefficients, N | Compute the autocorrelation coefficients for time series data.
Here we use scipy.signal.correlate, but the results are the same as in
Yang, et al., 2012 for unbias=1:
"The autocorrelation coefficient refers to the correlation of a time
series with its own past or future values. iGAIT uses unbiased
autocorrelation coefficients of acceleration data to scale the regularity
and symmetry of gait.
The autocorrelation coefficients are divided by :math:`fc(0)`,
so that the autocorrelation coefficient is equal to :math:`1` when :math:`t=0`:
.. math::
NFC(t) = \\frac{fc(t)}{fc(0)}
Here :math:`NFC(t)` is the normalised autocorrelation coefficient, and :math:`fc(t)` are
autocorrelation coefficients."
:param data: time series data
:type data: numpy array
:param unbias: autocorrelation, divide by range (1) or by weighted range (2)
:type unbias: integer or None
:param normalize: divide by 1st coefficient (1) or by maximum abs. value (2)
:type normalize: integer or None
:return coefficients: autocorrelation coefficients [normalized, unbiased]
:rtype coefficients: numpy array
:return N: number of coefficients
:rtype N: integer
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import autocorrelate
>>> data = np.random.random(100)
>>> unbias = 2
>>> normalize = 2
>>> plot_test = True
>>> coefficients, N = autocorrelate(data, unbias, normalize, plot_test) |
def chord(ref, est, **kwargs):
r'''Chord evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.chord.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='chord')[0]
>>> est_ann = est_jam.search(namespace='chord')[0]
>>> scores = jams.eval.chord(ref_ann, est_ann)
'''
namespace = 'chord'
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_interval, ref_value = ref.to_interval_values()
est_interval, est_value = est.to_interval_values()
return mir_eval.chord.evaluate(ref_interval, ref_value,
est_interval, est_value, **kwargs) | r'''Chord evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.chord.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='chord')[0]
>>> est_ann = est_jam.search(namespace='chord')[0]
>>> scores = jams.eval.chord(ref_ann, est_ann) |
def _attach_to_model(self, model):
"""
Check that the model can handle dynamic fields
"""
if not issubclass(model, ModelWithDynamicFieldMixin):
raise ImplementationError(
'The "%s" model does not inherit from ModelWithDynamicFieldMixin '
'so the "%s" DynamicField cannot be attached to it' % (
model.__name__, self.name))
super(DynamicFieldMixin, self)._attach_to_model(model)
if self.dynamic_version_of is not None:
return
if hasattr(model, self.name):
return
setattr(model, self.name, self) | Check that the model can handle dynamic fields |
def _revert_caffe2_pad(attr):
"""Removing extra padding from Caffe2."""
if len(attr) == 4:
attr = attr[:2]
elif len(attr) == 2:
pass
else:
raise ValueError("Invalid caffe2 type padding: {}".format(attr))
return attr | Removing extra padding from Caffe2. |
def paint( self, painter, option, widget ):
"""
Paints this item on the painter.
:param painter | <QPainter>
option | <QStyleOptionGraphicsItem>
widget | <QWidget>
"""
if ( self._rebuildRequired ):
self.rebuild()
# set the coloring options
painter.setPen(self.borderColor())
if ( self.isSelected() ):
painter.setBrush(self.highlightColor())
else:
painter.setBrush(self.fillColor())
hints = painter.renderHints()
if ( not self.isAllDay() ):
painter.setRenderHint(painter.Antialiasing)
pen = painter.pen()
pen.setWidthF(0.25)
painter.setPen(pen)
painter.drawPath(self.path())
# draw the text in the different rect locations
title = self.title()
painter.setPen(self.textColor())
for data in self._textData:
painter.drawText(*data)
painter.setRenderHints(hints) | Paints this item on the painter.
:param painter | <QPainter>
option | <QStyleOptionGraphicsItem>
widget | <QWidget> |
def list_experiments(project_path,
sort=None,
output=None,
filter_op=None,
info_keys=None):
"""Lists experiments in the directory subtree.
Args:
project_path (str): Directory where experiments are located.
Corresponds to Experiment.local_dir.
sort (str): Key to sort by.
output (str): Name of file where output is saved.
filter_op (str): Filter operation in the format
"<column> <operator> <value>".
info_keys (list): Keys that are displayed.
"""
_check_tabulate()
base, experiment_folders, _ = next(os.walk(project_path))
experiment_data_collection = []
for experiment_dir in experiment_folders:
experiment_state = _get_experiment_state(
os.path.join(base, experiment_dir))
if not experiment_state:
logger.debug("No experiment state found in %s", experiment_dir)
continue
checkpoints = pd.DataFrame(experiment_state["checkpoints"])
runner_data = experiment_state["runner_data"]
# Format time-based values.
time_values = {
"start_time": runner_data.get("_start_time"),
"last_updated": experiment_state.get("timestamp"),
}
formatted_time_values = {
key: datetime.fromtimestamp(val).strftime(TIMESTAMP_FORMAT)
if val else None
for key, val in time_values.items()
}
experiment_data = {
"name": experiment_dir,
"total_trials": checkpoints.shape[0],
"running_trials": (checkpoints["status"] == Trial.RUNNING).sum(),
"terminated_trials": (
checkpoints["status"] == Trial.TERMINATED).sum(),
"error_trials": (checkpoints["status"] == Trial.ERROR).sum(),
}
experiment_data.update(formatted_time_values)
experiment_data_collection.append(experiment_data)
if not experiment_data_collection:
print("No experiments found!")
sys.exit(0)
info_df = pd.DataFrame(experiment_data_collection)
if not info_keys:
info_keys = DEFAULT_PROJECT_INFO_KEYS
col_keys = [k for k in list(info_keys) if k in info_df]
if not col_keys:
print("None of keys {} in experiment data!".format(info_keys))
sys.exit(0)
info_df = info_df[col_keys]
if filter_op:
col, op, val = filter_op.split(" ")
col_type = info_df[col].dtype
if is_numeric_dtype(col_type):
val = float(val)
elif is_string_dtype(col_type):
val = str(val)
# TODO(Andrew): add support for datetime and boolean
else:
raise ValueError("Unsupported dtype for \"{}\": {}".format(
val, col_type))
op = OPERATORS[op]
filtered_index = op(info_df[col], val)
info_df = info_df[filtered_index]
if sort:
if sort not in info_df:
raise KeyError("Sort Index \"{}\" not in: {}".format(
sort, list(info_df)))
info_df = info_df.sort_values(by=sort)
print_format_output(info_df)
if output:
file_extension = os.path.splitext(output)[1].lower()
if file_extension in (".p", ".pkl", ".pickle"):
info_df.to_pickle(output)
elif file_extension == ".csv":
info_df.to_csv(output, index=False)
else:
raise ValueError("Unsupported filetype: {}".format(output))
print("Output saved at:", output) | Lists experiments in the directory subtree.
Args:
project_path (str): Directory where experiments are located.
Corresponds to Experiment.local_dir.
sort (str): Key to sort by.
output (str): Name of file where output is saved.
filter_op (str): Filter operation in the format
"<column> <operator> <value>".
info_keys (list): Keys that are displayed. |
def skip_if_empty(func):
"""
Decorator for validation functions which makes them pass if the value
passed in is the EMPTY sentinal value.
"""
@partial_safe_wraps(func)
def inner(value, *args, **kwargs):
if value is EMPTY:
return
else:
return func(value, *args, **kwargs)
return inner | Decorator for validation functions which makes them pass if the value
passed in is the EMPTY sentinal value. |
def _get_containers(self):
"""Return available containers."""
infos = self.native_conn.list_containers_info()
return [self.cont_cls(self, i['name'], i['count'], i['bytes'])
for i in infos] | Return available containers. |
def _sort(self, short_list, sorts):
"""
TAKE SHORTLIST, RETURN IT SORTED
:param short_list:
:param sorts: LIST OF SORTS TO PERFORM
:return:
"""
sort_values = self._index_columns(sorts)
# RECURSIVE SORTING
output = []
def _sort_more(short_list, i, sorts):
if len(sorts) == 0:
output.extend(short_list)
sort = sorts[0]
index = self._index[sort_values[i]]
if sort.sort == 1:
sorted_keys = sorted(index.keys())
elif sort.sort == -1:
sorted_keys = reversed(sorted(index.keys()))
else:
sorted_keys = list(index.keys())
for k in sorted_keys:
self._sort(index[k] & short_list, i + 1, sorts[1:])
_sort_more(short_list, 0, sorts)
return output | TAKE SHORTLIST, RETURN IT SORTED
:param short_list:
:param sorts: LIST OF SORTS TO PERFORM
:return: |
def bls_snr(blsdict,
times,
mags,
errs,
assumeserialbls=False,
magsarefluxes=False,
sigclip=10.0,
npeaks=None,
perioddeltapercent=10,
ingressdurationfraction=0.1,
verbose=True):
'''Calculates the signal to noise ratio for each best peak in the BLS
periodogram, along with transit depth, duration, and refit period and epoch.
The following equation is used for SNR::
SNR = (transit model depth / RMS of LC with transit model subtracted)
* sqrt(number of points in transit)
Parameters
----------
blsdict : dict
This is an lspinfo dict produced by either `bls_parallel_pfind` or
`bls_serial_pfind` in this module, or by your own BLS function. If you
provide results in a dict from an external BLS function, make sure this
matches the form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'blsresult': list of result dicts from eebls.f wrapper functions,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'nphasebins': the actual nphasebins used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
times,mags,errs : np.array
These contain the magnitude/flux time-series and any associated errors.
assumeserialbls : bool
If this is True, this function will not rerun BLS around each best peak
in the input lspinfo dict to refit the periods and epochs. This is
usally required for `bls_parallel_pfind` so set this to False if you use
results from that function. The parallel method breaks up the frequency
space into chunks for speed, and the results may not exactly match those
from a regular BLS run.
magsarefluxes : bool
Set to True if the input measurements in `mags` are actually fluxes and
not magnitudes.
npeaks : int or None
This controls how many of the periods in `blsdict['nbestperiods']` to
find the SNR for. If it's None, then this will calculate the SNR for all
of them. If it's an integer between 1 and
`len(blsdict['nbestperiods'])`, will calculate for only the specified
number of peak periods, starting from the best period.
perioddeltapercent : float
The fraction of the period provided to use to search around this
value. This is a percentage. The period range searched will then be::
[period - (perioddeltapercent/100.0)*period,
period + (perioddeltapercent/100.0)*period]
ingressdurationfraction : float
The fraction of the transit duration to use to generate an initial value
of the transit ingress duration for the BLS model refit. This will be
fit by this function.
verbose : bool
If True, will indicate progress and any problems encountered.
Returns
-------
dict
A dict of the following form is returned::
{'npeaks: the number of periodogram peaks requested to get SNR for,
'period': list of refit best periods for each requested peak,
'epoch': list of refit epochs (i.e. mid-transit times),
'snr':list of SNRs of the transit for each requested peak,
'transitdepth':list of depths of the transits,
'transitduration':list of durations of the transits,
'nphasebins':the input value of nphasebins,
'transingressbin':the phase bin containing transit ingress,
'transegressbin':the phase bin containing transit egress,
'allblsmodels':the full BLS models used along with its parameters,
'allsubtractedmags':BLS models - phased light curves,
'allphasedmags':the phase light curves,
'allphases': the phase values}
'''
# figure out how many periods to work on
if (npeaks and (0 < npeaks < len(blsdict['nbestperiods']))):
nperiods = npeaks
else:
if verbose:
LOGWARNING('npeaks not specified or invalid, '
'getting SNR for all %s BLS peaks' %
len(blsdict['nbestperiods']))
nperiods = len(blsdict['nbestperiods'])
nbestperiods = blsdict['nbestperiods'][:nperiods]
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
nbestsnrs = []
transitdepth, transitduration = [], []
nphasebins, transingressbin, transegressbin = [], [], []
# keep these around for diagnostics
allsubtractedmags = []
allphasedmags = []
allphases = []
allblsmodels = []
# these are refit periods and epochs
refitperiods = []
refitepochs = []
for period in nbestperiods:
# get the period interval
startp = period - perioddeltapercent*period/100.0
if startp < 0:
startp = period
endp = period + perioddeltapercent*period/100.0
# see if we need to rerun bls_serial_pfind
if not assumeserialbls:
# run bls_serial_pfind with the kwargs copied over from the
# initial run. replace only the startp, endp, verbose, sigclip
# kwarg values
prevkwargs = blsdict['kwargs'].copy()
prevkwargs['verbose'] = verbose
prevkwargs['startp'] = startp
prevkwargs['endp'] = endp
prevkwargs['sigclip'] = None
blsres = bls_serial_pfind(stimes,
smags,
serrs,
**prevkwargs)
else:
blsres = blsdict
thistransdepth = blsres['blsresult']['transdepth']
thistransduration = blsres['blsresult']['transduration']
thisbestperiod = blsres['bestperiod']
thistransingressbin = blsres['blsresult']['transingressbin']
thistransegressbin = blsres['blsresult']['transegressbin']
thisnphasebins = blsdict['kwargs']['nphasebins']
stats = _get_bls_stats(stimes,
smags,
serrs,
thistransdepth,
thistransduration,
ingressdurationfraction,
nphasebins,
thistransingressbin,
thistransegressbin,
thisbestperiod,
thisnphasebins,
magsarefluxes=magsarefluxes,
verbose=verbose)
# update the lists with results from this peak
nbestsnrs.append(stats['snr'])
transitdepth.append(stats['transitdepth'])
transitduration.append(stats['transitduration'])
transingressbin.append(stats['transingressbin'])
transegressbin.append(stats['transegressbin'])
nphasebins.append(stats['nphasebins'])
# update the refit periods and epochs
refitperiods.append(stats['period'])
refitepochs.append(stats['epoch'])
# update the diagnostics
allsubtractedmags.append(stats['subtractedmags'])
allphasedmags.append(stats['phasedmags'])
allphases.append(stats['phases'])
allblsmodels.append(stats['blsmodel'])
# done with working on each peak
# if there aren't enough points in the mag series, bail out
else:
LOGERROR('no good detections for these times and mags, skipping...')
nbestsnrs = None
transitdepth, transitduration = None, None
nphasebins, transingressbin, transegressbin = None, None, None
allsubtractedmags, allphases, allphasedmags = None, None, None
return {'npeaks':npeaks,
'period':refitperiods,
'epoch':refitepochs,
'snr':nbestsnrs,
'transitdepth':transitdepth,
'transitduration':transitduration,
'nphasebins':nphasebins,
'transingressbin':transingressbin,
'transegressbin':transegressbin,
'allblsmodels':allblsmodels,
'allsubtractedmags':allsubtractedmags,
'allphasedmags':allphasedmags,
'allphases':allphases} | Calculates the signal to noise ratio for each best peak in the BLS
periodogram, along with transit depth, duration, and refit period and epoch.
The following equation is used for SNR::
SNR = (transit model depth / RMS of LC with transit model subtracted)
* sqrt(number of points in transit)
Parameters
----------
blsdict : dict
This is an lspinfo dict produced by either `bls_parallel_pfind` or
`bls_serial_pfind` in this module, or by your own BLS function. If you
provide results in a dict from an external BLS function, make sure this
matches the form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'blsresult': list of result dicts from eebls.f wrapper functions,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'nphasebins': the actual nphasebins used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
times,mags,errs : np.array
These contain the magnitude/flux time-series and any associated errors.
assumeserialbls : bool
If this is True, this function will not rerun BLS around each best peak
in the input lspinfo dict to refit the periods and epochs. This is
usally required for `bls_parallel_pfind` so set this to False if you use
results from that function. The parallel method breaks up the frequency
space into chunks for speed, and the results may not exactly match those
from a regular BLS run.
magsarefluxes : bool
Set to True if the input measurements in `mags` are actually fluxes and
not magnitudes.
npeaks : int or None
This controls how many of the periods in `blsdict['nbestperiods']` to
find the SNR for. If it's None, then this will calculate the SNR for all
of them. If it's an integer between 1 and
`len(blsdict['nbestperiods'])`, will calculate for only the specified
number of peak periods, starting from the best period.
perioddeltapercent : float
The fraction of the period provided to use to search around this
value. This is a percentage. The period range searched will then be::
[period - (perioddeltapercent/100.0)*period,
period + (perioddeltapercent/100.0)*period]
ingressdurationfraction : float
The fraction of the transit duration to use to generate an initial value
of the transit ingress duration for the BLS model refit. This will be
fit by this function.
verbose : bool
If True, will indicate progress and any problems encountered.
Returns
-------
dict
A dict of the following form is returned::
{'npeaks: the number of periodogram peaks requested to get SNR for,
'period': list of refit best periods for each requested peak,
'epoch': list of refit epochs (i.e. mid-transit times),
'snr':list of SNRs of the transit for each requested peak,
'transitdepth':list of depths of the transits,
'transitduration':list of durations of the transits,
'nphasebins':the input value of nphasebins,
'transingressbin':the phase bin containing transit ingress,
'transegressbin':the phase bin containing transit egress,
'allblsmodels':the full BLS models used along with its parameters,
'allsubtractedmags':BLS models - phased light curves,
'allphasedmags':the phase light curves,
'allphases': the phase values} |
def setup_exchanges(app):
"""
Setup result exchange to route all tasks to platform queue.
"""
with app.producer_or_acquire() as P:
# Ensure all queues are noticed and configured with their
# appropriate exchange.
for q in app.amqp.queues.values():
P.maybe_declare(q) | Setup result exchange to route all tasks to platform queue. |
def get_base_wrappers(method='get', template_name='', predicates=(), wrappers=()):
""" basic View Wrappers used by view_config.
"""
wrappers += (preserve_view(MethodPredicate(method), *predicates),)
if template_name:
wrappers += (render_template(template_name),)
return wrappers | basic View Wrappers used by view_config. |
def connect(self):
''' instantiate objects / parse config file '''
# open config file for parsing
try:
settings = configparser.ConfigParser()
settings._interpolation = configparser.ExtendedInterpolation()
except Exception as err:
self.logger.error("Failed to instantiate config parser exception: %s" % err)
raise
try:
settings.read(self.__config__)
except Exception as err:
self.logger.error("Failed to read config file exception: %s" % err)
raise
# Connect to Symphony
symphony_p12 = settings.get('symphony', 'symphony_p12')
symphony_pwd = settings.get('symphony', 'symphony_pwd')
symphony_pod_uri = settings.get('symphony', 'symphony_pod_uri')
symphony_keymanager_uri = settings.get('symphony', 'symphony_keymanager_uri')
symphony_agent_uri = settings.get('symphony', 'symphony_agent_uri')
symphony_sessionauth_uri = settings.get('symphony', 'symphony_sessionauth_uri')
symphony_sid = settings.get('symphony', 'symphony_sid')
crypt = symphony.Crypt(symphony_p12, symphony_pwd)
symphony_crt, symphony_key = crypt.p12parse()
try:
# instantiate auth methods
auth = symphony.Auth(symphony_sessionauth_uri, symphony_keymanager_uri, symphony_crt, symphony_key)
# get session token
session_token = auth.get_session_token()
self.logger.info("AUTH ( session token ): %s" % session_token)
# get keymanager token
keymngr_token = auth.get_keymanager_token()
self.logger.info("AUTH ( key manager token ): %s" % keymngr_token)
# instantiate agent methods
agent = symphony.Agent(symphony_agent_uri, session_token, keymngr_token)
# instantiate pod methods
pod = symphony.Pod(symphony_pod_uri, session_token, keymngr_token)
self.logger.info("INSTANTIATION ( all objects successful)")
except Exception as err:
self.logger.error("Failed to authenticate and initialize: %s" % err)
raise
# return references and such
return agent, pod, symphony_sid | instantiate objects / parse config file |
def validate(self, model, checks=[]):
"""Use a defined schema to validate the given table."""
records = self.data.to_dict("records")
self.evaluate_report(
validate(records, headers=list(records[0]),
preset='table', schema=self.schema,
order_fields=True, custom_checks=checks)) | Use a defined schema to validate the given table. |
def auprc(y_true, y_pred):
"""Area under the precision-recall curve
"""
y_true, y_pred = _mask_value_nan(y_true, y_pred)
return skm.average_precision_score(y_true, y_pred) | Area under the precision-recall curve |
def close(self):
"""Print error log and close session"""
if self.error_log and not self.quiet:
print("\nErrors occured:", file=sys.stderr)
for err in self.error_log:
print(err, file=sys.stderr)
self._session.close() | Print error log and close session |
def has_listener(self, evt_name, fn):
"""指定listener是否存在
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
"""
listeners = self.__get_listeners(evt_name)
return fn in listeners | 指定listener是否存在
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数 |
def execute(opts, data, func, args, kwargs):
'''
Allow for the calling of execution modules via sudo.
This module is invoked by the minion if the ``sudo_user`` minion config is
present.
Example minion config:
.. code-block:: yaml
sudo_user: saltdev
Once this setting is made, any execution module call done by the minion will be
run under ``sudo -u <sudo_user> salt-call``. For example, with the above
minion config,
.. code-block:: bash
salt sudo_minion cmd.run 'cat /etc/sudoers'
is equivalent to
.. code-block:: bash
sudo -u saltdev salt-call cmd.run 'cat /etc/sudoers'
being run on ``sudo_minion``.
'''
cmd = ['sudo',
'-u', opts.get('sudo_user'),
'salt-call',
'--out', 'json',
'--metadata',
'-c', opts.get('config_dir'),
'--',
data.get('fun')]
if data['fun'] in ('state.sls', 'state.highstate', 'state.apply'):
kwargs['concurrent'] = True
for arg in args:
cmd.append(_cmd_quote(six.text_type(arg)))
for key in kwargs:
cmd.append(_cmd_quote('{0}={1}'.format(key, kwargs[key])))
cmd_ret = __salt__['cmd.run_all'](cmd, use_vt=True, python_shell=False)
if cmd_ret['retcode'] == 0:
cmd_meta = salt.utils.json.loads(cmd_ret['stdout'])['local']
ret = cmd_meta['return']
__context__['retcode'] = cmd_meta.get('retcode', 0)
else:
ret = cmd_ret['stderr']
__context__['retcode'] = cmd_ret['retcode']
return ret | Allow for the calling of execution modules via sudo.
This module is invoked by the minion if the ``sudo_user`` minion config is
present.
Example minion config:
.. code-block:: yaml
sudo_user: saltdev
Once this setting is made, any execution module call done by the minion will be
run under ``sudo -u <sudo_user> salt-call``. For example, with the above
minion config,
.. code-block:: bash
salt sudo_minion cmd.run 'cat /etc/sudoers'
is equivalent to
.. code-block:: bash
sudo -u saltdev salt-call cmd.run 'cat /etc/sudoers'
being run on ``sudo_minion``. |
def _default_hashfunc(content, hashbits):
"""
Default hash function is variable-length version of Python's builtin hash.
:param content: data that needs to hash.
:return: return a decimal number.
"""
if content == "":
return 0
x = ord(content[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in content:
x = ((x * m) ^ ord(c)) & mask
x ^= len(content)
if x == -1:
x = -2
return x | Default hash function is variable-length version of Python's builtin hash.
:param content: data that needs to hash.
:return: return a decimal number. |
def resolve_outputs(self):
'''Resolve the names of outputs for this layer into shape tuples.'''
input_shape = None
for i, shape in enumerate(self._input_shapes.values()):
if i == 0:
input_shape = shape
if len(input_shape) != len(shape) or any(
a is not None and b is not None and a != b
for a, b in zip(input_shape[:-1], shape[:-1])):
raise util.ConfigurationError(
'layer "{}" incompatible input shapes {}'
.format(self.name, self._input_shapes))
size = self.kwargs.get('size')
shape = self.kwargs.get('shape')
if shape is not None:
pass
elif size is not None:
shape = tuple(input_shape[:-1]) + (size, )
else:
raise util.ConfigurationError(
'layer "{}" does not specify a size'.format(self.name))
self._output_shapes['out'] = shape | Resolve the names of outputs for this layer into shape tuples. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'tables') and self.tables is not None:
_dict['tables'] = [x._to_dict() for x in self.tables]
return _dict | Return a json dictionary representing this model. |
def get_script(self):
"""
Gets the configuration script of the logical enclosure by ID or URI.
Return:
str: Configuration script.
"""
uri = "{}/script".format(self.data["uri"])
return self._helper.do_get(uri) | Gets the configuration script of the logical enclosure by ID or URI.
Return:
str: Configuration script. |
def renew_voms_proxy(passwd="", vo=None, lifetime="196:00"):
"""
Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and
a default *lifetime* of 8 days. The password is written to a temporary file first and piped into
the renewal commad to ensure it is not visible in the process list.
"""
with tmp_file() as (_, tmp):
with open(tmp, "w") as f:
f.write(passwd)
cmd = "cat '{}' | voms-proxy-init --valid '{}'".format(tmp, lifetime)
if vo:
cmd += " -voms '{}'".format(vo)
code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if code != 0:
raise Exception("proxy renewal failed: {}".format(out)) | Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and
a default *lifetime* of 8 days. The password is written to a temporary file first and piped into
the renewal commad to ensure it is not visible in the process list. |
def __parse_organizations(self, stream):
"""Parse organizations stream"""
for aliases in self.__parse_stream(stream):
# Parse identity
identity = self.__parse_alias(aliases[1])
uuid = identity.email
uid = self._identities.get(uuid, None)
if not uid:
uid = UniqueIdentity(uuid=uuid)
identity.uuid = uuid
uid.identities.append(identity)
self._identities[uuid] = uid
# Parse organization
mailmap_id = aliases[0]
name = self.__encode(mailmap_id[0])
if name in MAILMAP_NO_ORGS:
continue
org = Organization(name=name)
self._organizations[name] = org
enrollment = Enrollment(start=MIN_PERIOD_DATE, end=MAX_PERIOD_DATE,
organization=org)
uid.enrollments.append(enrollment) | Parse organizations stream |
def get_nameid_data(self):
"""
Gets the NameID Data provided by the SAML Response from the IdP
:returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier)
:rtype: dict
"""
nameid = None
nameid_data = {}
encrypted_id_data_nodes = self.__query_assertion('/saml:Subject/saml:EncryptedID/xenc:EncryptedData')
if encrypted_id_data_nodes:
encrypted_data = encrypted_id_data_nodes[0]
key = self.__settings.get_sp_key()
nameid = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key)
else:
nameid_nodes = self.__query_assertion('/saml:Subject/saml:NameID')
if nameid_nodes:
nameid = nameid_nodes[0]
is_strict = self.__settings.is_strict()
want_nameid = self.__settings.get_security_data().get('wantNameId', True)
if nameid is None:
if is_strict and want_nameid:
raise OneLogin_Saml2_ValidationError(
'NameID not found in the assertion of the Response',
OneLogin_Saml2_ValidationError.NO_NAMEID
)
else:
if is_strict and want_nameid and not OneLogin_Saml2_Utils.element_text(nameid):
raise OneLogin_Saml2_ValidationError(
'An empty NameID value found',
OneLogin_Saml2_ValidationError.EMPTY_NAMEID
)
nameid_data = {'Value': OneLogin_Saml2_Utils.element_text(nameid)}
for attr in ['Format', 'SPNameQualifier', 'NameQualifier']:
value = nameid.get(attr, None)
if value:
if is_strict and attr == 'SPNameQualifier':
sp_data = self.__settings.get_sp_data()
sp_entity_id = sp_data.get('entityId', '')
if sp_entity_id != value:
raise OneLogin_Saml2_ValidationError(
'The SPNameQualifier value mistmatch the SP entityID value.',
OneLogin_Saml2_ValidationError.SP_NAME_QUALIFIER_NAME_MISMATCH
)
nameid_data[attr] = value
return nameid_data | Gets the NameID Data provided by the SAML Response from the IdP
:returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier)
:rtype: dict |
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError("section number"
"has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value | '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS. |
def create_index(self, index, index_type=GEO2D):
"""Create an index on a given attribute
:param str index: Attribute to set index on
:param str index_type: See PyMongo index types for further information, defaults to GEO2D index.
"""
self.logger.info("Adding %s index to stores on attribute: %s" % (index_type, index))
yield self.collection.create_index([(index, index_type)]) | Create an index on a given attribute
:param str index: Attribute to set index on
:param str index_type: See PyMongo index types for further information, defaults to GEO2D index. |
def update_message(self, message_id, category_id, title, body,
extended_body, use_textile=False, private=False, notify=None):
"""
Updates an existing message, optionally sending notifications to a
selected list of people. Note that you can also upload files using
this function, but you have to format the request as
multipart/form-data. (See the ruby Basecamp API wrapper for an example
of how to do this.)
"""
path = '/msg/update/%u' % message_id
req = ET.Element('request')
req.append(self._create_message_post_elem(category_id, title, body,
extended_body, use_textile=False, private=False))
if notify is not None:
for person_id in notify:
ET.SubElement(req, 'notify').text = str(int(person_id))
return self._request(path, req) | Updates an existing message, optionally sending notifications to a
selected list of people. Note that you can also upload files using
this function, but you have to format the request as
multipart/form-data. (See the ruby Basecamp API wrapper for an example
of how to do this.) |
def close(self, proto):
# pylint: disable=no-self-use
"""Closes a connection"""
try:
proto.sendClose()
except Exception as ex:
logger.exception("Failed to send close")
proto.reraise(ex) | Closes a connection |
def handler(*names, **kwargs):
"""Creates an Event Handler
This decorator can be applied to methods of classes derived from
:class:`circuits.core.components.BaseComponent`. It marks the method as a
handler for the events passed as arguments to the ``@handler`` decorator.
The events are specified by their name.
The decorated method's arguments must match the arguments passed to the
:class:`circuits.core.events.Event` on creation. Optionally, the
method may have an additional first argument named *event*. If declared,
the event object that caused the handler to be invoked is assigned to it.
By default, the handler is invoked by the component's root
:class:`~.manager.Manager` for events that are propagated on the channel
determined by the BaseComponent's *channel* attribute.
This may be overridden by specifying a different channel as a keyword
parameter of the decorator (``channel=...``).
Keyword argument ``priority`` influences the order in which handlers
for a specific event are invoked. The higher the priority, the earlier
the handler is executed.
If you want to override a handler defined in a base class of your
component, you must specify ``override=True``, else your method becomes
an additional handler for the event.
**Return value**
Normally, the results returned by the handlers for an event are simply
collected in the :class:`circuits.core.events.Event`'s :attr:`value`
attribute. As a special case, a handler may return a
:class:`types.GeneratorType`. This signals to the dispatcher that the
handler isn't ready to deliver a result yet.
Rather, it has interrupted it's execution with a ``yield None``
statement, thus preserving its current execution state.
The dispatcher saves the returned generator object as a task.
All tasks are reexamined (i.e. their :meth:`next()` method is invoked)
when the pending events have been executed.
This feature avoids an unnecessarily complicated chaining of event
handlers. Imagine a handler A that needs the results from firing an
event E in order to complete. Then without this feature, the final
action of A would be to fire event E, and another handler for
an event ``SuccessE`` would be required to complete handler A's
operation, now having the result from invoking E available
(actually it's even a bit more complicated).
Using this "suspend" feature, the handler simply fires event E and
then yields ``None`` until e.g. it finds a result in E's :attr:`value`
attribute. For the simplest scenario, there even is a utility
method :meth:`circuits.core.manager.Manager.callEvent` that combines
firing and waiting.
"""
def wrapper(f):
if names and isinstance(names[0], bool) and not names[0]:
f.handler = False
return f
if len(names) > 0 and inspect.isclass(names[0]) and \
issubclass(names[0], hfosEvent):
f.names = (str(names[0].realname()),)
else:
f.names = names
f.handler = True
f.priority = kwargs.get("priority", 0)
f.channel = kwargs.get("channel", None)
f.override = kwargs.get("override", False)
args = inspect.getargspec(f)[0]
if args and args[0] == "self":
del args[0]
f.event = getattr(f, "event", bool(args and args[0] == "event"))
return f
return wrapper | Creates an Event Handler
This decorator can be applied to methods of classes derived from
:class:`circuits.core.components.BaseComponent`. It marks the method as a
handler for the events passed as arguments to the ``@handler`` decorator.
The events are specified by their name.
The decorated method's arguments must match the arguments passed to the
:class:`circuits.core.events.Event` on creation. Optionally, the
method may have an additional first argument named *event*. If declared,
the event object that caused the handler to be invoked is assigned to it.
By default, the handler is invoked by the component's root
:class:`~.manager.Manager` for events that are propagated on the channel
determined by the BaseComponent's *channel* attribute.
This may be overridden by specifying a different channel as a keyword
parameter of the decorator (``channel=...``).
Keyword argument ``priority`` influences the order in which handlers
for a specific event are invoked. The higher the priority, the earlier
the handler is executed.
If you want to override a handler defined in a base class of your
component, you must specify ``override=True``, else your method becomes
an additional handler for the event.
**Return value**
Normally, the results returned by the handlers for an event are simply
collected in the :class:`circuits.core.events.Event`'s :attr:`value`
attribute. As a special case, a handler may return a
:class:`types.GeneratorType`. This signals to the dispatcher that the
handler isn't ready to deliver a result yet.
Rather, it has interrupted it's execution with a ``yield None``
statement, thus preserving its current execution state.
The dispatcher saves the returned generator object as a task.
All tasks are reexamined (i.e. their :meth:`next()` method is invoked)
when the pending events have been executed.
This feature avoids an unnecessarily complicated chaining of event
handlers. Imagine a handler A that needs the results from firing an
event E in order to complete. Then without this feature, the final
action of A would be to fire event E, and another handler for
an event ``SuccessE`` would be required to complete handler A's
operation, now having the result from invoking E available
(actually it's even a bit more complicated).
Using this "suspend" feature, the handler simply fires event E and
then yields ``None`` until e.g. it finds a result in E's :attr:`value`
attribute. For the simplest scenario, there even is a utility
method :meth:`circuits.core.manager.Manager.callEvent` that combines
firing and waiting. |
def sample_discrete(self, state=None, n_steps=100, random_state=None):
r"""Generate a random sequence of states by propagating the model
using discrete time steps given by the model lagtime.
Parameters
----------
state : {None, ndarray, label}
Specify the starting state for the chain.
``None``
Choose the initial state by randomly drawing from the model's
stationary distribution.
``array-like``
If ``state`` is a 1D array with length equal to ``n_states_``,
then it is is interpreted as an initial multinomial
distribution from which to draw the chain's initial state.
Note that the indexing semantics of this array must match the
_internal_ indexing of this model.
otherwise
Otherwise, ``state`` is interpreted as a particular
deterministic state label from which to begin the trajectory.
n_steps : int
Lengths of the resulting trajectory
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Returns
-------
sequence : array of length n_steps
A randomly sampled label sequence
"""
random = check_random_state(random_state)
r = random.rand(1 + n_steps)
if state is None:
initial = np.sum(np.cumsum(self.populations_) < r[0])
elif hasattr(state, '__len__') and len(state) == self.n_states_:
initial = np.sum(np.cumsum(state) < r[0])
else:
initial = self.mapping_[state]
cstr = np.cumsum(self.transmat_, axis=1)
chain = [initial]
for i in range(1, n_steps):
chain.append(np.sum(cstr[chain[i - 1], :] < r[i]))
return self.inverse_transform([chain])[0] | r"""Generate a random sequence of states by propagating the model
using discrete time steps given by the model lagtime.
Parameters
----------
state : {None, ndarray, label}
Specify the starting state for the chain.
``None``
Choose the initial state by randomly drawing from the model's
stationary distribution.
``array-like``
If ``state`` is a 1D array with length equal to ``n_states_``,
then it is is interpreted as an initial multinomial
distribution from which to draw the chain's initial state.
Note that the indexing semantics of this array must match the
_internal_ indexing of this model.
otherwise
Otherwise, ``state`` is interpreted as a particular
deterministic state label from which to begin the trajectory.
n_steps : int
Lengths of the resulting trajectory
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Returns
-------
sequence : array of length n_steps
A randomly sampled label sequence |
def sort(self, column_or_label, descending=False, distinct=False):
"""Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
"""
column = self._get_column(column_or_label)
if distinct:
_, row_numbers = np.unique(column, return_index=True)
else:
row_numbers = np.argsort(column, axis=0, kind='mergesort')
assert (row_numbers < self.num_rows).all(), row_numbers
if descending:
row_numbers = np.array(row_numbers[::-1])
return self.take(row_numbers) | Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2 |
def init_state(self):
''' Sets the initial state of the state machine. '''
self.in_warc_response = False
self.in_http_response = False
self.in_payload = False | Sets the initial state of the state machine. |
def require_single_root_target(self):
"""If a single target was specified on the cmd line, returns that target.
Otherwise throws TaskError.
:API: public
"""
target_roots = self.context.target_roots
if len(target_roots) == 0:
raise TaskError('No target specified.')
elif len(target_roots) > 1:
raise TaskError('Multiple targets specified: {}'
.format(', '.join([repr(t) for t in target_roots])))
return target_roots[0] | If a single target was specified on the cmd line, returns that target.
Otherwise throws TaskError.
:API: public |
def setConfigurable(self, state):
"""
Sets whether or not this logger widget is configurable.
:param state | <bool>
"""
self._configurable = state
self._configButton.setVisible(state) | Sets whether or not this logger widget is configurable.
:param state | <bool> |
def invoked(self, ctx):
"""
Guacamole method used by the command ingredient.
:param ctx:
The guacamole context object. Context provides access to all
features of guacamole. The argparse ingredient adds the ``args``
attribute to it. That attribute contains the result of parsing
command line arguments.
:returns:
The return code of the command. Guacamole translates ``None`` to a
successful exit status (return code zero).
"""
print("{} + {} = {}".format(
ctx.args.x,
ctx.args.y,
ctx.args.x + ctx.args.y)) | Guacamole method used by the command ingredient.
:param ctx:
The guacamole context object. Context provides access to all
features of guacamole. The argparse ingredient adds the ``args``
attribute to it. That attribute contains the result of parsing
command line arguments.
:returns:
The return code of the command. Guacamole translates ``None`` to a
successful exit status (return code zero). |
def import_name(mod_name):
"""Import a module by module name.
@param mod_name: module name.
"""
try:
mod_obj_old = sys.modules[mod_name]
except KeyError:
mod_obj_old = None
if mod_obj_old is not None:
return mod_obj_old
__import__(mod_name)
mod_obj = sys.modules[mod_name]
return mod_obj | Import a module by module name.
@param mod_name: module name. |
def _on_hid_pnp(self, w_param, l_param):
"Process WM_DEVICECHANGE system messages"
new_status = "unknown"
if w_param == DBT_DEVICEARRIVAL:
# hid device attached
notify_obj = None
if int(l_param):
# Disable this error since pylint doesn't reconize
# that from_address actually exists
# pylint: disable=no-member
notify_obj = DevBroadcastDevInterface.from_address(l_param)
#confirm if the right message received
if notify_obj and \
notify_obj.dbcc_devicetype == DBT_DEVTYP_DEVICEINTERFACE:
#only connect if already disconnected
new_status = "connected"
elif w_param == DBT_DEVICEREMOVECOMPLETE:
# hid device removed
notify_obj = None
if int(l_param):
# Disable this error since pylint doesn't reconize
# that from_address actually exists
# pylint: disable=no-member
notify_obj = DevBroadcastDevInterface.from_address(l_param)
if notify_obj and \
notify_obj.dbcc_devicetype == DBT_DEVTYP_DEVICEINTERFACE:
#only connect if already disconnected
new_status = "disconnected"
#verify if need to call event handler
if new_status != "unknown" and new_status != self.current_status:
self.current_status = new_status
self.on_hid_pnp(self.current_status)
#
return True | Process WM_DEVICECHANGE system messages |
def _is_image_sequenced(image):
"""Determine if the image is a sequenced image."""
try:
image.seek(1)
image.seek(0)
result = True
except EOFError:
result = False
return result | Determine if the image is a sequenced image. |
def get_valid_error(x1, x2=-1):
"""
Function that validates:
* x1 is possible to convert to numpy array
* x2 is possible to convert to numpy array (if exists)
* x1 and x2 have the same length (if both exist)
"""
# just error
if type(x2) == int and x2 == -1:
try:
e = np.array(x1)
except:
raise ValueError('Impossible to convert series to a numpy array')
# two series
else:
try:
x1 = np.array(x1)
x2 = np.array(x2)
except:
raise ValueError('Impossible to convert one of series to a numpy array')
if not len(x1) == len(x2):
raise ValueError('The length of both series must agree.')
e = x1 - x2
return e | Function that validates:
* x1 is possible to convert to numpy array
* x2 is possible to convert to numpy array (if exists)
* x1 and x2 have the same length (if both exist) |
def handle_setting_changed(sender, setting, value, enter, **kwargs): # pylint: disable=unused-argument
"""
Reinitialize handler implementation if a relevant setting changes
in e.g. application reconfiguration or during testing.
"""
if setting == 'AXES_HANDLER':
AxesProxyHandler.get_implementation(force=True) | Reinitialize handler implementation if a relevant setting changes
in e.g. application reconfiguration or during testing. |
def rgb2termhex(r: int, g: int, b: int) -> str:
""" Convert an rgb value to the nearest hex value that matches a term code.
The hex value will be one in `hex2term_map`.
"""
incs = [0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff]
res = []
parts = r, g, b
for part in parts:
if (part < 0) or (part > 255):
raise ValueError(
'Expecting 0-255 for RGB code, got: {!r}'.format(parts)
)
i = 0
while i < len(incs) - 1:
s, b = incs[i], incs[i + 1] # smaller, bigger
if s <= part <= b:
s1 = abs(s - part)
b1 = abs(b - part)
if s1 < b1:
closest = s
else:
closest = b
res.append(closest)
break
i += 1
# Convert back into nearest hex value.
return rgb2hex(*res) | Convert an rgb value to the nearest hex value that matches a term code.
The hex value will be one in `hex2term_map`. |
def _g(self, z):
"""Helper function to solve Frank copula.
This functions encapsulates :math:`g_z = e^{-\\theta z} - 1` used on Frank copulas.
Argument:
z: np.ndarray
Returns:
np.ndarray
"""
return np.exp(np.multiply(-self.theta, z)) - 1 | Helper function to solve Frank copula.
This functions encapsulates :math:`g_z = e^{-\\theta z} - 1` used on Frank copulas.
Argument:
z: np.ndarray
Returns:
np.ndarray |
def certclone(chain, copy_extensions=False):
for i in range(len(chain)):
chain[i] = chain[i].to_cryptography()
newchain = []
'''
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
pubkey = key.public_key()
'''
first = True
for original in chain[::-1]:
#print(cert)
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
).decode()
if first:
print(key_pem)
first=False
pubkey = key.public_key()
# Todo: Code to mimic the private key type of original cert
# maybe based on pubkey.__class__
cert = x509.CertificateBuilder()
cert = cert.subject_name(original.subject)
cert = cert.issuer_name(original.issuer)
#cert = cert.serial_number(original.serial_number)
cert = cert.serial_number(x509.random_serial_number())
cert = cert.not_valid_before(original.not_valid_before)
cert = cert.not_valid_after(original.not_valid_after)
cert = cert.public_key(pubkey)
if copy_extensions:
for ext in original.extensions:
cert = cert.add_extension(ext.value, critical=ext.critical)
cert = cert.sign(private_key=key, algorithm=original.signature_hash_algorithm, backend=default_backend())
cert_pem = cert.public_bytes(serialization.Encoding.PEM).decode()
print(cert_pem)
newchain.insert(0, cert) | key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
pubkey = key.public_key() |
def __period_remaining(self):
'''
Return the period remaining for the current rate limit window.
:return: The remaing period.
:rtype: float
'''
elapsed = self.clock() - self.last_reset
return self.period - elapsed | Return the period remaining for the current rate limit window.
:return: The remaing period.
:rtype: float |
def parse_band_log(self, message):
"""Process incoming logging messages from the service."""
if "payload" in message and hasattr(message["payload"], "name"):
record = message["payload"]
for k in dir(record):
if k.startswith("workflows_exc_"):
setattr(record, k[14:], getattr(record, k))
delattr(record, k)
for k, v in self.get_status().items():
setattr(record, "workflows_" + k, v)
logging.getLogger(record.name).handle(record)
else:
self.log.warning(
"Received broken record on log band\n" + "Message: %s\nRecord: %s",
str(message),
str(
hasattr(message.get("payload"), "__dict__")
and message["payload"].__dict__
),
) | Process incoming logging messages from the service. |
def base_dict_to_string(base_dict):
"""
Converts a dictionary to a string. {'C': 12, 'A':4} gets converted to C:12;A:4
:param base_dict: Dictionary of bases and counts created by find_if_multibase
:return: String representing that dictionary.
"""
outstr = ''
# First, sort base_dict so that major allele always comes first - makes output report nicer to look at.
base_list = sorted(base_dict.items(), key=lambda kv: kv[1], reverse=True)
for base in base_list:
outstr += '{}:{};'.format(base[0], base[1])
return outstr[:-1] | Converts a dictionary to a string. {'C': 12, 'A':4} gets converted to C:12;A:4
:param base_dict: Dictionary of bases and counts created by find_if_multibase
:return: String representing that dictionary. |
def close(self, **kw):
"""
This asks Tor to close the underlying circuit object. See
:meth:`txtorcon.torstate.TorState.close_circuit`
for details.
You may pass keyword arguments to take care of any Flags Tor
accepts for the CLOSECIRCUIT command. Currently, this is only
"IfUnused". So for example: circ.close(IfUnused=True)
:return: Deferred which callbacks with this Circuit instance
ONLY after Tor has confirmed it is gone (not simply that the
CLOSECIRCUIT command has been queued). This could be a while
if you included IfUnused.
"""
# we're already closed; nothing to do
if self.state == 'CLOSED':
return defer.succeed(None)
# someone already called close() but we're not closed yet
if self._closing_deferred:
d = defer.Deferred()
def closed(arg):
d.callback(arg)
return arg
self._closing_deferred.addBoth(closed)
return d
# actually-close the circuit
self._closing_deferred = defer.Deferred()
def close_command_is_queued(*args):
return self._closing_deferred
d = self._torstate.close_circuit(self.id, **kw)
d.addCallback(close_command_is_queued)
return d | This asks Tor to close the underlying circuit object. See
:meth:`txtorcon.torstate.TorState.close_circuit`
for details.
You may pass keyword arguments to take care of any Flags Tor
accepts for the CLOSECIRCUIT command. Currently, this is only
"IfUnused". So for example: circ.close(IfUnused=True)
:return: Deferred which callbacks with this Circuit instance
ONLY after Tor has confirmed it is gone (not simply that the
CLOSECIRCUIT command has been queued). This could be a while
if you included IfUnused. |
def get_renderers(self):
"""
Instantiates and returns the list of renderers that this view can use.
"""
try:
source = self.get_object()
except (ImproperlyConfigured, APIException):
self.renderer_classes = [RENDERER_MAPPING[i] for i in self.__class__.renderers]
return [RENDERER_MAPPING[i]() for i in self.__class__.renderers]
else:
self.renderer_classes = [RENDERER_MAPPING[i] for i in source.__class__.renderers]
return [RENDERER_MAPPING[i]() for i in source.__class__.renderers] | Instantiates and returns the list of renderers that this view can use. |
def wash_urlargd(form, content):
"""Wash the complete form based on the specification in content.
Content is a dictionary containing the field names as a
key, and a tuple (type, default) as value.
'type' can be list, unicode, legacy.wsgi.utils.StringField, int,
tuple, or legacy.wsgi.utils.Field (for file uploads).
The specification automatically includes the 'ln' field, which is
common to all queries.
Arguments that are not defined in 'content' are discarded.
.. note::
In case `list` or `tuple` were asked for, we assume that
`list` or `tuple` of strings is to be returned. Therefore beware when
you want to use ``wash_urlargd()`` for multiple file upload forms.
:returns: argd dictionary that can be used for passing function
parameters by keywords.
"""
result = {}
for k, (dst_type, default) in content.items():
try:
value = form[k]
except KeyError:
result[k] = default
continue
src_type = type(value)
# First, handle the case where we want all the results. In
# this case, we need to ensure all the elements are strings,
# and not Field instances.
if src_type in (list, tuple):
if dst_type is list:
result[k] = [x for x in value]
continue
if dst_type is tuple:
result[k] = tuple([x for x in value])
continue
# in all the other cases, we are only interested in the
# first value.
value = value[0]
# Allow passing argument modyfing function.
if isinstance(dst_type, types.FunctionType):
result[k] = dst_type(value)
continue
# Maybe we already have what is expected? Then don't change
# anything.
if isinstance(value, dst_type):
result[k] = value
continue
# Since we got here, 'value' is sure to be a single symbol,
# not a list kind of structure anymore.
if dst_type in (int, float, long, bool):
try:
result[k] = dst_type(value)
except:
result[k] = default
elif dst_type is tuple:
result[k] = (value, )
elif dst_type is list:
result[k] = [value]
else:
raise ValueError(
'cannot cast form value %s of type %r into type %r' % (
value, src_type, dst_type))
return result | Wash the complete form based on the specification in content.
Content is a dictionary containing the field names as a
key, and a tuple (type, default) as value.
'type' can be list, unicode, legacy.wsgi.utils.StringField, int,
tuple, or legacy.wsgi.utils.Field (for file uploads).
The specification automatically includes the 'ln' field, which is
common to all queries.
Arguments that are not defined in 'content' are discarded.
.. note::
In case `list` or `tuple` were asked for, we assume that
`list` or `tuple` of strings is to be returned. Therefore beware when
you want to use ``wash_urlargd()`` for multiple file upload forms.
:returns: argd dictionary that can be used for passing function
parameters by keywords. |
def _input_as_lines(self, data):
"""Writes data to tempfile and sets -i parameter
data -- list of lines, ready to be written to file
"""
if data:
self.Parameters['-i']\
.on(super(CD_HIT,self)._input_as_lines(data))
return '' | Writes data to tempfile and sets -i parameter
data -- list of lines, ready to be written to file |
def start_at(self, document_fields):
"""Start query at a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_at(document_fields) | Start query at a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. |
def fix_bam_header(job, bamfile, sample_type, univ_options, samtools_options, retained_chroms=None):
"""
Fix the bam header to remove the command line call. Failing to do this causes Picard to reject
the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param list retained_chroms: A list of chromosomes to retain
:return: fsID for the output bam
:rtype: toil.fileStore.FileID
"""
if retained_chroms is None:
retained_chroms = []
work_dir = os.getcwd()
input_files = {
sample_type + '.bam': bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['view',
'-H',
input_files[sample_type + '.bam']]
with open('/'.join([work_dir, sample_type + '_input_bam.header']), 'w') as headerfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=headerfile,
tool_version=samtools_options['version'])
with open(headerfile.name, 'r') as headerfile, \
open('/'.join([work_dir, sample_type + '_output_bam.header']), 'w') as outheaderfile:
for line in headerfile:
if line.startswith('@PG'):
line = '\t'.join([x for x in line.strip().split('\t') if not x.startswith('CL')])
if retained_chroms and line.startswith('@SQ'):
if line.strip().split()[1].lstrip('SN:') not in retained_chroms:
continue
print(line.strip(), file=outheaderfile)
parameters = ['reheader',
docker_path(outheaderfile.name),
input_files[sample_type + '.bam']]
with open('/'.join([work_dir, sample_type + '_fixPG.bam']), 'w') as fixpg_bamfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=fixpg_bamfile,
tool_version=samtools_options['version'])
output_file = job.fileStore.writeGlobalFile(fixpg_bamfile.name)
# The old bam file is now useless.
job.fileStore.deleteGlobalFile(bamfile)
job.fileStore.logToMaster('Ran reheader on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | Fix the bam header to remove the command line call. Failing to do this causes Picard to reject
the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param list retained_chroms: A list of chromosomes to retain
:return: fsID for the output bam
:rtype: toil.fileStore.FileID |
def generate_move(self, position):
"""
Returns valid and legal move given position
:type: position: Board
:rtype: Move
"""
while True:
print(position)
raw = input(str(self.color) + "\'s move \n")
move = converter.short_alg(raw, self.color, position)
if move is None:
continue
return move | Returns valid and legal move given position
:type: position: Board
:rtype: Move |
def get_assessments_offered(self):
"""Gets the assessment offered list resulting from the search.
return: (osid.assessment.AssessmentOfferedList) - the assessment
offered list
raise: IllegalState - the assessment offered list has already
been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.AssessmentOfferedList(self._results, runtime=self._runtime) | Gets the assessment offered list resulting from the search.
return: (osid.assessment.AssessmentOfferedList) - the assessment
offered list
raise: IllegalState - the assessment offered list has already
been retrieved
*compliance: mandatory -- This method must be implemented.* |
def set_sim_data(inj, field, data):
"""Sets data of a SimInspiral instance."""
try:
sim_field = sim_inspiral_map[field]
except KeyError:
sim_field = field
# for tc, map to geocentric times
if sim_field == 'tc':
inj.geocent_end_time = int(data)
inj.geocent_end_time_ns = int(1e9*(data % 1))
else:
setattr(inj, sim_field, data) | Sets data of a SimInspiral instance. |
def files(self, *args, **kwargs):
""" D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
Accepts parameters to :meth:`listdir`.
"""
return [p for p in self.listdir(*args, **kwargs) if p.isfile()] | D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
Accepts parameters to :meth:`listdir`. |
def select(self, ids, do_emit=True, **kwargs):
"""Select some rows in the table.
By default, the `select` event is raised, unless `do_emit=False`.
"""
# Select the rows without emiting the event.
self.eval_js('table.select({}, false);'.format(dumps(ids)))
if do_emit:
# Emit the event manually if needed.
self.emit('select', ids, **kwargs) | Select some rows in the table.
By default, the `select` event is raised, unless `do_emit=False`. |
def configure_model(self, attrs, field_name):
'''
Hook for ResourceMeta class to call when initializing model class.
Saves fields obtained from resource class backlinks
'''
self.relationship = field_name
self._set_method_names(relationship=field_name)
if self.res_name is None:
self.res_name = grammar.singularize(attrs.get('endpoint', 'unknown').strip('/')) | Hook for ResourceMeta class to call when initializing model class.
Saves fields obtained from resource class backlinks |
def Cp_material(ID, T=298.15):
r'''Returns heat capacity of a building, insulating, or refractory
material from tables in [1]_, [2]_, and [3]_. Heat capacity may or
may not be dependent on temperature depending on the source used. Function
must be provided with either a key to one of the dictionaries
`refractories`, `ASHRAE`, or `building_materials` - or a search term which
will pick the closest match based on a fuzzy search. To determine which
source the fuzzy search will pick, use the function `nearest_material`.
Fuzzy searches are slow; it is preferable to call this function with a
material key directly.
Parameters
----------
ID : str
String as described above
T : float, optional
Temperature of the material, [K]
Returns
-------
Cp : float
Heat capacity of the material, [W/m/K]
Examples
--------
>>> Cp_material('Mineral fiber')
840.0
References
----------
.. [1] ASHRAE Handbook: Fundamentals. American Society of Heating,
Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
.. [2] DIN EN 12524 (2000-07) Building Materials and Products
Hygrothermal Properties - Tabulated Design Values; English Version of
DIN EN 12524.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
if ID not in materials_dict:
ID = nearest_material(ID)
if ID in refractories:
Cp = refractory_VDI_Cp(ID, T)
elif ID in building_materials:
Cp = float(building_materials[ID][2]) # Density available for all hits
else:
Cp = ASHRAE[ID][1]
if Cp is None:
raise Exception('Heat capacity is not available for this material')
else:
Cp = float(Cp)
return Cp | r'''Returns heat capacity of a building, insulating, or refractory
material from tables in [1]_, [2]_, and [3]_. Heat capacity may or
may not be dependent on temperature depending on the source used. Function
must be provided with either a key to one of the dictionaries
`refractories`, `ASHRAE`, or `building_materials` - or a search term which
will pick the closest match based on a fuzzy search. To determine which
source the fuzzy search will pick, use the function `nearest_material`.
Fuzzy searches are slow; it is preferable to call this function with a
material key directly.
Parameters
----------
ID : str
String as described above
T : float, optional
Temperature of the material, [K]
Returns
-------
Cp : float
Heat capacity of the material, [W/m/K]
Examples
--------
>>> Cp_material('Mineral fiber')
840.0
References
----------
.. [1] ASHRAE Handbook: Fundamentals. American Society of Heating,
Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
.. [2] DIN EN 12524 (2000-07) Building Materials and Products
Hygrothermal Properties - Tabulated Design Values; English Version of
DIN EN 12524.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010. |
def h(tagName, *children, **kwargs):
"""Takes an HTML Tag, children (string, array, or another element), and
attributes
Examples:
>>> h('div', [h('p', 'hey')])
<div><p>hey</p></div>
"""
attrs = {}
if 'attrs' in kwargs:
attrs = kwargs.pop('attrs')
attrs = attrs.copy()
attrs.update(kwargs)
el = createComponent(tagName)
return el(children, **attrs) | Takes an HTML Tag, children (string, array, or another element), and
attributes
Examples:
>>> h('div', [h('p', 'hey')])
<div><p>hey</p></div> |
async def start(self):
"""Start api initialization."""
_LOGGER.debug('Initializing pyEight Version: %s', __version__)
await self.fetch_token()
if self._token is not None:
await self.fetch_device_list()
await self.assign_users()
return True
else:
# We couldn't authenticate
return False | Start api initialization. |
def adjoint(self):
"""Adjoint of the sampling operator, a `WeightedSumSamplingOperator`.
If each sampling point occurs only once, the adjoint consists
in inserting the given values into the output at the sampling
points. Duplicate sampling points are weighted with their
multiplicity.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> sampling_points = [[0, 1, 1, 0],
... [0, 1, 2, 0]]
>>> op = odl.SamplingOperator(space, sampling_points)
>>> x = space.element([[1, 2, 3],
... [4, 5, 6]])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
The ``'integrate'`` variant adjoint puts ones at the indices in
``sampling_points``, multiplied by their multiplicity:
>>> op = odl.SamplingOperator(space, sampling_points,
... variant='integrate')
>>> op.adjoint(op.range.one()) # (0, 0) occurs twice
uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element(
[[ 2., 0., 0.],
[ 0., 1., 1.]]
)
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
"""
if self.variant == 'point_eval':
variant = 'dirac'
elif self.variant == 'integrate':
variant = 'char_fun'
else:
raise RuntimeError('bad variant {!r}'.format(self.variant))
return WeightedSumSamplingOperator(self.domain, self.sampling_points,
variant) | Adjoint of the sampling operator, a `WeightedSumSamplingOperator`.
If each sampling point occurs only once, the adjoint consists
in inserting the given values into the output at the sampling
points. Duplicate sampling points are weighted with their
multiplicity.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> sampling_points = [[0, 1, 1, 0],
... [0, 1, 2, 0]]
>>> op = odl.SamplingOperator(space, sampling_points)
>>> x = space.element([[1, 2, 3],
... [4, 5, 6]])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
The ``'integrate'`` variant adjoint puts ones at the indices in
``sampling_points``, multiplied by their multiplicity:
>>> op = odl.SamplingOperator(space, sampling_points,
... variant='integrate')
>>> op.adjoint(op.range.one()) # (0, 0) occurs twice
uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element(
[[ 2., 0., 0.],
[ 0., 1., 1.]]
)
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True |
def SetTimelineOwner(self, username):
"""Sets the username of the user that should own the timeline.
Args:
username (str): username.
"""
self._timeline_owner = username
logger.info('Owner of the timeline: {0!s}'.format(self._timeline_owner)) | Sets the username of the user that should own the timeline.
Args:
username (str): username. |
def tube_hires(script, height=1.0, radius=None, radius1=None, radius2=None,
diameter=None, diameter1=None, diameter2=None, cir_segments=32,
rad_segments=1, height_segments=1, center=False,
simple_bottom=False, color=None):
"""Create a cylinder with user defined number of segments
"""
# TODO: add option to round the top of the cylinder, i.e. deform spherically
# TODO: add warnings if values are ignored, e.g. if you specify both radius
# and diameter.
if radius is not None and diameter is None:
if radius1 is None and diameter1 is None:
radius1 = radius
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter is not None:
if radius1 is None and diameter1 is None:
radius1 = diameter / 2
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter1 is not None:
radius1 = diameter1 / 2
if diameter2 is not None:
radius2 = diameter2 / 2
if radius1 is None:
radius1 = 1
if radius2 is None:
radius2 = 0
# Create top
annulus_hires(script,
radius1=radius1,
radius2=radius2,
cir_segments=cir_segments,
rad_segments=rad_segments)
transform.translate(script, [0, 0, height])
# Create bottom
if simple_bottom:
annulus(script,
radius1=radius1,
radius2=radius2,
cir_segments=cir_segments)
else:
layers.duplicate(script)
transform.translate(script, [0, 0, -height])
# Rotate to correct normals
transform.rotate(script, 'x', 180)
# Create outer tube
cylinder_open_hires(script, height, radius1,
cir_segments=cir_segments,
height_segments=height_segments)
# Create inner tube
if radius2 != 0:
cylinder_open_hires(script, height, radius2,
cir_segments=cir_segments,
height_segments=height_segments,
invert_normals=True)
# Join everything together
layers.join(script)
# Need some tolerance on merge_vert due to rounding errors
clean.merge_vert(script, threshold=0.00002)
if center:
transform.translate(script, [0, 0, -height / 2])
if color is not None:
vert_color.function(script, color=color)
return None | Create a cylinder with user defined number of segments |
def addcol(msname, colname=None, shape=None,
data_desc_type='array', valuetype=None, init_with=0, **kw):
""" add column to MS
msanme : MS to add colmn to
colname : column name
shape : shape
valuetype : data type
data_desc_type : 'scalar' for scalar elements and array for 'array' elements
init_with : value to initialise the column with
"""
import numpy
import pyrap.tables
tab = pyrap.tables.table(msname,readonly=False)
try:
tab.getcol(colname)
print('Column already exists')
except RuntimeError:
print('Attempting to add %s column to %s'%(colname,msname))
from pyrap.tables import maketabdesc
valuetype = valuetype or 'complex'
if shape is None:
dshape = list(tab.getcol('DATA').shape)
shape = dshape[1:]
if data_desc_type=='array':
from pyrap.tables import makearrcoldesc
coldmi = tab.getdminfo('DATA') # God forbid this (or the TIME) column doesn't exist
coldmi['NAME'] = colname.lower()
tab.addcols(maketabdesc(makearrcoldesc(colname,init_with,shape=shape,valuetype=valuetype)),coldmi)
elif data_desc_type=='scalar':
from pyrap.tables import makescacoldesc
coldmi = tab.getdminfo('TIME')
coldmi['NAME'] = colname.lower()
tab.addcols(maketabdesc(makescacoldesc(colname,init_with,valuetype=valuetype)),coldmi)
print('Column added successfuly.')
if init_with:
nrows = dshape[0]
rowchunk = nrows//10 if nrows > 1000 else nrows
for row0 in range(0,nrows,rowchunk):
nr = min(rowchunk,nrows-row0)
dshape[0] = nr
tab.putcol(colname,numpy.ones(dshape,dtype=valuetype)*init_with,row0,nr)
tab.close() | add column to MS
msanme : MS to add colmn to
colname : column name
shape : shape
valuetype : data type
data_desc_type : 'scalar' for scalar elements and array for 'array' elements
init_with : value to initialise the column with |
def determine_deaths(self, event: Event):
"""Determines who dies each time step.
Parameters
----------
event :
An event object emitted by the simulation containing an index
representing the simulants affected by the event and timing
information.
"""
effective_rate = self.mortality_rate(event.index)
effective_probability = 1 - np.exp(-effective_rate)
draw = self.randomness.get_draw(event.index)
affected_simulants = draw < effective_probability
self.population_view.update(pd.Series('dead', index=event.index[affected_simulants])) | Determines who dies each time step.
Parameters
----------
event :
An event object emitted by the simulation containing an index
representing the simulants affected by the event and timing
information. |
def load(self, ):
"""If the reference is in the scene but unloaded, load it.
.. Note:: Do not confuse this with reference or import. Load means that it is already referenced.
But the data from the reference was not read until now. Load loads the data from the reference.
This will call :meth:`RefobjInterface.load` and set the status to :data:`Reftrack.LOADED`.
:returns: None
:rtype: None
:raises: :class:`ReftrackIntegrityError`
"""
assert self.status() == self.UNLOADED,\
"Cannot load if there is no unloaded reference. Use reference instead."
self.get_refobjinter().load(self._refobj)
self.set_status(self.LOADED)
self.fetch_new_children()
self.update_restrictions()
self.emit_data_changed() | If the reference is in the scene but unloaded, load it.
.. Note:: Do not confuse this with reference or import. Load means that it is already referenced.
But the data from the reference was not read until now. Load loads the data from the reference.
This will call :meth:`RefobjInterface.load` and set the status to :data:`Reftrack.LOADED`.
:returns: None
:rtype: None
:raises: :class:`ReftrackIntegrityError` |
def serialize(self, private=False):
"""
Go from a
cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey
or EllipticCurvePublicKey instance to a JWK representation.
:param private: Whether we should include the private attributes or not.
:return: A JWK as a dictionary
"""
if self.priv_key:
self._serialize(self.priv_key)
else:
self._serialize(self.pub_key)
res = self.common()
res.update({
"crv": self.crv,
"x": self.x,
"y": self.y
})
if private and self.d:
res["d"] = self.d
return res | Go from a
cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey
or EllipticCurvePublicKey instance to a JWK representation.
:param private: Whether we should include the private attributes or not.
:return: A JWK as a dictionary |
def _get_mime_type(self, buff):
"""Get the MIME type for a given stream of bytes
:param buff: Stream of bytes
:type buff: bytes
:rtype: str
"""
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("Couldn't detect content type of avatar image"
". Specify the 'contentType' parameter explicitly.")
return None | Get the MIME type for a given stream of bytes
:param buff: Stream of bytes
:type buff: bytes
:rtype: str |
def gaussian_distribution(mean, stdev, num_pts=50):
""" get an x and y numpy.ndarray that spans the +/- 4
standard deviation range of a gaussian distribution with
a given mean and standard deviation. useful for plotting
Parameters
----------
mean : float
the mean of the distribution
stdev : float
the standard deviation of the distribution
num_pts : int
the number of points in the returned ndarrays.
Default is 50
Returns
-------
x : numpy.ndarray
the x-values of the distribution
y : numpy.ndarray
the y-values of the distribution
"""
warnings.warn("pyemu.helpers.gaussian_distribution() has moved to plot_utils",PyemuWarning)
from pyemu import plot_utils
return plot_utils.gaussian_distribution(mean=mean,stdev=stdev,num_pts=num_pts) | get an x and y numpy.ndarray that spans the +/- 4
standard deviation range of a gaussian distribution with
a given mean and standard deviation. useful for plotting
Parameters
----------
mean : float
the mean of the distribution
stdev : float
the standard deviation of the distribution
num_pts : int
the number of points in the returned ndarrays.
Default is 50
Returns
-------
x : numpy.ndarray
the x-values of the distribution
y : numpy.ndarray
the y-values of the distribution |
Subsets and Splits