code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def markdown(iterable, renderer=HTMLRenderer):
"""
Output HTML with default settings.
Enables inline and block-level HTML tags.
"""
with renderer() as renderer:
return renderer.render(Document(iterable)) | Output HTML with default settings.
Enables inline and block-level HTML tags. |
def push_account_task(obj_id):
"""
Async: push_account_task.delay(Account.id)
"""
lock_id = "%s-push-account-%s" % (settings.ENV_PREFIX, obj_id)
acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE) # noqa: E731
release_lock = lambda: cache.delete(lock_id) # noqa: E731
if acquire_lock():
UserModel = get_user_model()
try:
upload_intercom_user(obj_id)
except UserModel.DoesNotExist:
# seems like account was removed before it was pushed
release_lock()
release_lock() | Async: push_account_task.delay(Account.id) |
def get_first():
"""
return first droplet
"""
client = po.connect() # this depends on the DIGITALOCEAN_API_KEY envvar
all_droplets = client.droplets.list()
id = all_droplets[0]['id'] # I'm cheating because I only have one droplet
return client.droplets.get(id) | return first droplet |
def _get_hosted_zone_limit(self, limit_type, hosted_zone_id):
"""
Return a hosted zone limit [recordsets|vpc_associations]
:rtype: dict
"""
result = self.conn.get_hosted_zone_limit(
Type=limit_type,
HostedZoneId=hosted_zone_id
)
return result | Return a hosted zone limit [recordsets|vpc_associations]
:rtype: dict |
def fetch_token(self, client_secret, code, context, scope, redirect_uri,
token_url='https://login.bigcommerce.com/oauth2/token'):
"""
Fetches a token from given token_url, using given parameters, and sets up session headers for
future requests.
redirect_uri should be the same as your callback URL.
code, context, and scope should be passed as parameters to your callback URL on app installation.
Raises HttpException on failure (same as Connection methods).
"""
res = self.post(token_url, {'client_id': self.client_id,
'client_secret': client_secret,
'code': code,
'context': context,
'scope': scope,
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri},
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self._session.headers.update(self._oauth_headers(self.client_id, res['access_token']))
return res | Fetches a token from given token_url, using given parameters, and sets up session headers for
future requests.
redirect_uri should be the same as your callback URL.
code, context, and scope should be passed as parameters to your callback URL on app installation.
Raises HttpException on failure (same as Connection methods). |
def search(self, fields=None, query=None, filters=None):
"""Search for entities.
At its simplest, this method searches for all entities of a given kind.
For example, to ask for all
:class:`nailgun.entities.LifecycleEnvironment` entities::
LifecycleEnvironment().search()
Values on an entity are used to generate a search query, and the
``fields`` argument can be used to specify which fields should be used
when generating a search query::
lc_env = LifecycleEnvironment(name='foo', organization=1)
results = lc_env.search() # Search by name and organization.
results = lc_env.search({'name', 'organization'}) # Same.
results = lc_env.search({'name'}) # Search by name.
results = lc_env.search({'organization'}) # Search by organization
results = lc_env.search(set()) # Search for all lifecycle envs.
results = lc_env.search({'library'}) # Error!
In some cases, the simple search queries that can be generated by
NailGun are not sufficient. In this case, you can pass in a raw search
query instead. For example, to search for all lifecycle environments
with a name of 'foo'::
LifecycleEnvironment().search(query={'search': 'name="foo"'})
The example above is rather pointless: it is easier and more concise to
use a generated query. But — and this is a **very** important "but" —
the manual search query is melded in to the generated query. This can
be used to great effect::
LifecycleEnvironment(name='foo').search(query={'per_page': 50})
For examples of what the final search queries look like, see
:meth:`search_payload`. (That method also accepts the ``fields`` and
``query`` arguments.)
In some cases, the server's search facilities may be insufficient, or
it may be inordinately difficult to craft a search query. In this case,
you can filter search results locally. For example, to ask the server
for a list of all lifecycle environments and then locally search
through the results for the lifecycle environment named "foo"::
LifecycleEnvironment().search(filters={'name': 'foo'})
Be warned that filtering locally can be **very** slow. NailGun must
``read()`` every single entity returned by the server before filtering
results. This is because the values used in the filtering process may
not have been returned by the server in the initial response to the
search.
The fact that all entities are read when ``filters`` is specified can
be used to great effect. For example, this search returns a fully
populated list of every single lifecycle environment::
LifecycleEnvironment().search(filters={})
:param fields: A set naming which fields should be used when generating
a search query. If ``None``, all values on the entity are used. If
an empty set, no values are used.
:param query: A dict containing a raw search query. This is melded in
to the generated search query like so: ``{generated:
query}.update({manual: query})``.
:param filters: A dict. Used to filter search results locally.
:return: A list of entities, all of type ``type(self)``.
"""
# Goals:
#
# * Be tolerant of missing values. It's reasonable for the server to
# return an incomplete set of attributes for each search result.
# * Use as many returned values as possible. There's no point in
# letting returned data go to waste. This implies that we must…
# * …parse irregular server responses. This includes pluralized field
# names, misnamed attributes (e.g. BZ 1233245) and weirdly named
# fields (e.g. Media.path_).
#
results = self.search_json(fields, query)['results']
results = self.search_normalize(results)
entities = [
type(self)(self._server_config, **result)
for result in results
]
if filters is not None:
entities = self.search_filter(entities, filters)
return entities | Search for entities.
At its simplest, this method searches for all entities of a given kind.
For example, to ask for all
:class:`nailgun.entities.LifecycleEnvironment` entities::
LifecycleEnvironment().search()
Values on an entity are used to generate a search query, and the
``fields`` argument can be used to specify which fields should be used
when generating a search query::
lc_env = LifecycleEnvironment(name='foo', organization=1)
results = lc_env.search() # Search by name and organization.
results = lc_env.search({'name', 'organization'}) # Same.
results = lc_env.search({'name'}) # Search by name.
results = lc_env.search({'organization'}) # Search by organization
results = lc_env.search(set()) # Search for all lifecycle envs.
results = lc_env.search({'library'}) # Error!
In some cases, the simple search queries that can be generated by
NailGun are not sufficient. In this case, you can pass in a raw search
query instead. For example, to search for all lifecycle environments
with a name of 'foo'::
LifecycleEnvironment().search(query={'search': 'name="foo"'})
The example above is rather pointless: it is easier and more concise to
use a generated query. But — and this is a **very** important "but" —
the manual search query is melded in to the generated query. This can
be used to great effect::
LifecycleEnvironment(name='foo').search(query={'per_page': 50})
For examples of what the final search queries look like, see
:meth:`search_payload`. (That method also accepts the ``fields`` and
``query`` arguments.)
In some cases, the server's search facilities may be insufficient, or
it may be inordinately difficult to craft a search query. In this case,
you can filter search results locally. For example, to ask the server
for a list of all lifecycle environments and then locally search
through the results for the lifecycle environment named "foo"::
LifecycleEnvironment().search(filters={'name': 'foo'})
Be warned that filtering locally can be **very** slow. NailGun must
``read()`` every single entity returned by the server before filtering
results. This is because the values used in the filtering process may
not have been returned by the server in the initial response to the
search.
The fact that all entities are read when ``filters`` is specified can
be used to great effect. For example, this search returns a fully
populated list of every single lifecycle environment::
LifecycleEnvironment().search(filters={})
:param fields: A set naming which fields should be used when generating
a search query. If ``None``, all values on the entity are used. If
an empty set, no values are used.
:param query: A dict containing a raw search query. This is melded in
to the generated search query like so: ``{generated:
query}.update({manual: query})``.
:param filters: A dict. Used to filter search results locally.
:return: A list of entities, all of type ``type(self)``. |
def get_columns(self, font):
""" Return the number of columns for the given font.
"""
font = self.get_font(font)
return self.fonts[six.text_type(font)]['columns'] | Return the number of columns for the given font. |
def fit(self, X, y, step_size=0.1, init_weights=None, warm_start: bool=False):
"""Fit the weights on the given predictions.
Args:
X (array-like): Predictions of different models for the labels.
y (array-like): Labels.
step_size (float): Step size for optimizing the weights.
Smaller step sizes most likely improve resulting score but increases training time.
init_weights (array-like): Initial weights for training.
When `warm_start` is used `init_weights` are ignored.
warm_start (bool): Continues training. Will only work when `fit` has been called with this object earlier.
When `warm_start` is used `init_weights` are ignored.
Returns: self
"""
assert len(np.shape(X)) == 2, 'X must be 2-dimensional, got {}-D instead.'.format(len(np.shape(X)))
assert np.shape(X)[0] > 1, 'X must contain predictions from at least two models. ' \
'Got {} instead'.format(np.shape(X)[0])
assert np.shape(X)[1] == len(y), (
'BlendingOptimizer: Length of predictions and labels does not match: '
'preds_len={}, y_len={}'.format(np.shape(X)[1], len(y)))
if warm_start:
assert self._weights is not None, 'Optimizer has to be fitted before `warm_start` can be used.'
weights = self._weights
elif init_weights is None:
weights = np.array([1.0] * len(X))
else:
assert (len(init_weights) == np.shape(X)[0]), (
'BlendingOptimizer: Number of models to blend its predictions and weights does not match: '
'n_models={}, weights_len={}'.format(np.shape(X)[0], len(init_weights)))
weights = init_weights
def __is_better_score(score_to_test, score):
return score_to_test > score if self.maximize else not score_to_test > score
score = 0
best_score = self.maximize - 0.5
while __is_better_score(best_score, score):
best_score = self.metric(y, np.average(np.power(X, self._power), weights=weights, axis=0) ** (
1.0 / self._power))
score = best_score
best_index, best_step = -1, 0.0
for j in range(len(X)):
delta = np.array([(0 if k != j else step_size) for k in range(len(X))])
s = self.metric(y, np.average(np.power(X, self._power), weights=weights + delta, axis=0) ** (
1.0 / self._power))
if __is_better_score(s, best_score):
best_index, best_score, best_step = j, s, step_size
continue
if weights[j] - step_size >= 0:
s = self.metric(y, np.average(np.power(X, self._power), weights=weights - delta, axis=0) ** (
1.0 / self._power))
if s > best_score:
best_index, best_score, best_step = j, s, -step_size
if __is_better_score(best_score, score):
weights[best_index] += best_step
self._weights = weights
self._score = best_score
return self | Fit the weights on the given predictions.
Args:
X (array-like): Predictions of different models for the labels.
y (array-like): Labels.
step_size (float): Step size for optimizing the weights.
Smaller step sizes most likely improve resulting score but increases training time.
init_weights (array-like): Initial weights for training.
When `warm_start` is used `init_weights` are ignored.
warm_start (bool): Continues training. Will only work when `fit` has been called with this object earlier.
When `warm_start` is used `init_weights` are ignored.
Returns: self |
def watch_files(self):
"""watch files for changes, if changed, rebuild blog. this thread
will quit if the main process ends"""
try:
while 1:
sleep(1) # check every 1s
try:
files_stat = self.get_files_stat()
except SystemExit:
logger.error("Error occurred, server shut down")
self.shutdown_server()
if self.files_stat != files_stat:
logger.info("Changes detected, start rebuilding..")
try:
generator.re_generate()
global _root
_root = generator.root
except SystemExit: # catch sys.exit, it means fatal error
logger.error("Error occurred, server shut down")
self.shutdown_server()
self.files_stat = files_stat # update files' stat
except KeyboardInterrupt:
# I dont know why, but this exception won't be catched
# because absolutly each KeyboardInterrupt is catched by
# the server thread, which will terminate this thread the same time
logger.info("^C received, shutting down watcher")
self.shutdown_watcher() | watch files for changes, if changed, rebuild blog. this thread
will quit if the main process ends |
def vertex_normals(self):
"""
The vertex normals of the mesh. If the normals were loaded
we check to make sure we have the same number of vertex
normals and vertices before returning them. If there are
no vertex normals defined or a shape mismatch we calculate
the vertex normals from the mean normals of the faces the
vertex is used in.
Returns
----------
vertex_normals : (n,3) float
Represents the surface normal at each vertex.
Where n == len(self.vertices)
"""
# make sure we have faces_sparse
assert hasattr(self.faces_sparse, 'dot')
vertex_normals = geometry.mean_vertex_normals(
vertex_count=len(self.vertices),
faces=self.faces,
face_normals=self.face_normals,
sparse=self.faces_sparse)
return vertex_normals | The vertex normals of the mesh. If the normals were loaded
we check to make sure we have the same number of vertex
normals and vertices before returning them. If there are
no vertex normals defined or a shape mismatch we calculate
the vertex normals from the mean normals of the faces the
vertex is used in.
Returns
----------
vertex_normals : (n,3) float
Represents the surface normal at each vertex.
Where n == len(self.vertices) |
def str_rstrip(x, to_strip=None):
"""Remove trailing characters from a string sample.
:param str to_strip: The string to be removed
:returns: an expression containing the modified string column.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.rstrip(to_strip='ing')
Expression = str_rstrip(text, to_strip='ing')
Length: 5 dtype: str (expression)
---------------------------------
0 Someth
1 very pretty
2 is com
3 our
4 way.
"""
# in c++ we give empty string the same meaning as None
sl = _to_string_sequence(x).rstrip('' if to_strip is None else to_strip) if to_strip != '' else x
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) | Remove trailing characters from a string sample.
:param str to_strip: The string to be removed
:returns: an expression containing the modified string column.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.rstrip(to_strip='ing')
Expression = str_rstrip(text, to_strip='ing')
Length: 5 dtype: str (expression)
---------------------------------
0 Someth
1 very pretty
2 is com
3 our
4 way. |
def create_cookie(self, delete=None):
"""
Creates the value for ``Set-Cookie`` HTTP header.
:param bool delete:
If ``True`` the cookie value will be ``deleted`` and the
Expires value will be ``Thu, 01-Jan-1970 00:00:01 GMT``.
"""
value = 'deleted' if delete else self._serialize(self.data)
split_url = parse.urlsplit(self.adapter.url)
domain = split_url.netloc.split(':')[0]
# Work-around for issue #11, failure of WebKit-based browsers to accept
# cookies set as part of a redirect response in some circumstances.
if '.' not in domain:
template = '{name}={value}; Path={path}; HttpOnly{secure}{expires}'
else:
template = ('{name}={value}; Domain={domain}; Path={path}; '
'HttpOnly{secure}{expires}')
return template.format(
name=self.name,
value=value,
domain=domain,
path=split_url.path,
secure='; Secure' if self.secure else '',
expires='; Expires=Thu, 01-Jan-1970 00:00:01 GMT' if delete else ''
) | Creates the value for ``Set-Cookie`` HTTP header.
:param bool delete:
If ``True`` the cookie value will be ``deleted`` and the
Expires value will be ``Thu, 01-Jan-1970 00:00:01 GMT``. |
def get_shark_field(self, fields):
"""
:fields: str[]
"""
out = super(BACK, self).get_shark_field(fields)
out.update({'acked_seqs': self.acked_seqs,
'bitmap_str': self.bitmap_str})
return out | :fields: str[] |
def get_config(self):
"""
serialize to a dict all attributes except model weights
Returns
-------
dict
"""
self.update_network_description()
result = dict(self.__dict__)
result['_network'] = None
result['network_weights'] = None
result['network_weights_loader'] = None
result['prediction_cache'] = None
return result | serialize to a dict all attributes except model weights
Returns
-------
dict |
def _get_channel(self):
"""Returns a channel according to if there is a redirection to do or
not.
"""
channel = self._transport.open_session()
channel.set_combine_stderr(True)
channel.get_pty()
return channel | Returns a channel according to if there is a redirection to do or
not. |
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id']) | Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later. |
def _parse_launch_error(data):
"""
Parses a LAUNCH_ERROR message and returns a LaunchFailure object.
:type data: dict
:rtype: LaunchFailure
"""
return LaunchFailure(
data.get(ERROR_REASON, None),
data.get(APP_ID),
data.get(REQUEST_ID),
) | Parses a LAUNCH_ERROR message and returns a LaunchFailure object.
:type data: dict
:rtype: LaunchFailure |
def add_sparse_covariance_matrix(self,x,y,names,iidx,jidx,data):
"""build a pyemu.SparseMatrix instance implied by Vario2d
Parameters
----------
x : (iterable of floats)
x-coordinate locations
y : (iterable of floats)
y-coordinate locations
names : (iterable of str)
names of locations. If None, cov must not be None
iidx : 1-D ndarray
i row indices
jidx : 1-D ndarray
j col indices
data : 1-D ndarray
nonzero entries
Returns
-------
None
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
if not isinstance(y, np.ndarray):
y = np.array(y)
assert x.shape[0] == y.shape[0]
assert x.shape[0] == len(names)
# c = np.zeros((len(names), len(names)))
# np.fill_diagonal(c, self.contribution)
# cov = Cov(x=c, names=names)
# elif cov is not None:
# assert cov.shape[0] == x.shape[0]
# names = cov.row_names
# c = np.zeros((len(names), 1)) + self.contribution
# cont = Cov(x=c, names=names, isdiagonal=True)
# cov += cont
#
# else:
# raise Exception("Vario2d.covariance_matrix() requires either" +
# "names or cov arg")
# rc = self.rotation_coefs
for i,name in enumerate(names):
iidx.append(i)
jidx.append(i)
data.append(self.contribution)
for i1, (n1, x1, y1) in enumerate(zip(names, x, y)):
dx = x1 - x[i1 + 1:]
dy = y1 - y[i1 + 1:]
dxx, dyy = self._apply_rotation(dx, dy)
h = np.sqrt(dxx * dxx + dyy * dyy)
h[h < 0.0] = 0.0
cv = self._h_function(h)
if np.any(np.isnan(cv)):
raise Exception("nans in cv for i1 {0}".format(i1))
#cv[h>self.a] = 0.0
j = list(np.arange(i1+1,x.shape[0]))
i = [i1] * len(j)
iidx.extend(i)
jidx.extend(j)
data.extend(list(cv))
# replicate across the diagonal
iidx.extend(j)
jidx.extend(i)
data.extend(list(cv)) | build a pyemu.SparseMatrix instance implied by Vario2d
Parameters
----------
x : (iterable of floats)
x-coordinate locations
y : (iterable of floats)
y-coordinate locations
names : (iterable of str)
names of locations. If None, cov must not be None
iidx : 1-D ndarray
i row indices
jidx : 1-D ndarray
j col indices
data : 1-D ndarray
nonzero entries
Returns
-------
None |
def _readBlock(self):
"""Read a block of data from the remote reader."""
if self.interrupted or self.fp is None:
if self.debug:
log.msg('WorkerFileDownloadCommand._readBlock(): end')
return True
length = self.blocksize
if self.bytes_remaining is not None and length > self.bytes_remaining:
length = self.bytes_remaining
if length <= 0:
if self.stderr is None:
self.stderr = "Maximum filesize reached, truncating file '{0}'".format(
self.path)
self.rc = 1
return True
else:
d = self.reader.callRemote('read', length)
d.addCallback(self._writeData)
return d | Read a block of data from the remote reader. |
def datasetsBM(host=biomart_host):
"""
Lists BioMart datasets.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(biomart_host)
server.show_datasets()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v) | Lists BioMart datasets.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing |
def generate_unit_squares(image_width, image_height):
"""Generate coordinates for a tiling of unit squares."""
# Iterate over the required rows and cells. The for loops (x, y)
# give the coordinates of the top left-hand corner of each square:
#
# (x, y) +-----+ (x + 1, y)
# | |
# | |
# | |
# (x, y + 1) +-----+ (x + 1, y + 1)
#
for x in range(image_width):
for y in range(image_height):
yield [(x, y), (x + 1, y), (x + 1, y + 1), (x, y + 1)] | Generate coordinates for a tiling of unit squares. |
def _get_baremetal_connections(self, port,
only_active_switch=False,
from_segment=False):
"""Get switch ips and interfaces from baremetal transaction.
This method is used to extract switch/interface
information from transactions where VNIC_TYPE is
baremetal.
:param port: Received port transaction
:param only_active_switch: Indicator for selecting
connections with switches that are active
:param from_segment: only return interfaces from the
segment/transaction as opposed to
say port channels which are learned.
:Returns: list of switch_ip, intf_type, port_id, is_native
"""
connections = []
is_native = False if self.trunk.is_trunk_subport(port) else True
all_link_info = port[bc.portbindings.PROFILE]['local_link_information']
for link_info in all_link_info:
# Extract port info
intf_type, port = nexus_help.split_interface_name(
link_info['port_id'])
# Determine if this switch is to be skipped
switch_info = self._get_baremetal_switch_info(
link_info)
if not switch_info:
continue
switch_ip = switch_info['switch_ip']
# If not for Nexus
if not self._switch_defined(switch_ip):
continue
# Requested connections for only active switches
if (only_active_switch and
not self.is_switch_active(switch_ip)):
continue
ch_grp = 0
if not from_segment:
try:
reserved = nxos_db.get_switch_if_host_mappings(
switch_ip,
nexus_help.format_interface_name(
intf_type, port))
if reserved[0].ch_grp > 0:
ch_grp = reserved[0].ch_grp
intf_type, port = nexus_help.split_interface_name(
'', ch_grp)
except excep.NexusHostMappingNotFound:
pass
connections.append((switch_ip, intf_type, port,
is_native, ch_grp))
return connections | Get switch ips and interfaces from baremetal transaction.
This method is used to extract switch/interface
information from transactions where VNIC_TYPE is
baremetal.
:param port: Received port transaction
:param only_active_switch: Indicator for selecting
connections with switches that are active
:param from_segment: only return interfaces from the
segment/transaction as opposed to
say port channels which are learned.
:Returns: list of switch_ip, intf_type, port_id, is_native |
def vmomentsurfacemass(self,R,n,m,t=0.,nsigma=None,deg=False,
epsrel=1.e-02,epsabs=1.e-05,phi=0.,
grid=None,gridpoints=101,returnGrid=False,
hierarchgrid=False,nlevels=2,
print_progress=False,
integrate_method='dopr54_c',
deriv=None):
"""
NAME:
vmomentsurfacemass
PURPOSE:
calculate the an arbitrary moment of the velocity distribution at (R,phi) times the surfacmass
INPUT:
R - radius at which to calculate the moment (in natural units)
phi= azimuth (rad unless deg=True)
n - vR^n
m - vT^m
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous, but not too generous)
deg= azimuth is in degree (default=False)
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid; if this was created for a list of times, moments are calculated for each time
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
print_progress= if True, print progress updates
integrate_method= orbit.integrate method argument
deriv= None, 'R', or 'phi': calculates derivative of the moment wrt R or phi **onnly with grid options**
OUTPUT:
<vR^n vT^m x surface-mass> at R,phi (no support for units)
COMMENT:
grid-based calculation is the only one that is heavily tested (although the test suite also tests the direct calculation)
HISTORY:
2011-03-30 - Written - Bovy (NYU)
"""
#if we have already precalculated a grid, use that
if not grid is None and isinstance(grid,evolveddiskdfGrid):
if returnGrid:
return (self._vmomentsurfacemassGrid(n,m,grid),grid)
else:
return self._vmomentsurfacemassGrid(n,m,grid)
elif not grid is None \
and isinstance(grid,evolveddiskdfHierarchicalGrid):
if returnGrid:
return (self._vmomentsurfacemassHierarchicalGrid(n,m,grid),
grid)
else:
return self._vmomentsurfacemassHierarchicalGrid(n,m,grid)
#Otherwise we need to do some more work
if deg: az= phi*_DEGTORAD
else: az= phi
if nsigma is None: nsigma= _NSIGMA
if _PROFILE: #pragma: no cover
start= time_module.time()
if hasattr(self._initdf,'_estimatemeanvR') \
and hasattr(self._initdf,'_estimatemeanvT') \
and hasattr(self._initdf,'_estimateSigmaR2') \
and hasattr(self._initdf,'_estimateSigmaT2'):
sigmaR1= nu.sqrt(self._initdf._estimateSigmaR2(R,phi=az))
sigmaT1= nu.sqrt(self._initdf._estimateSigmaT2(R,phi=az))
meanvR= self._initdf._estimatemeanvR(R,phi=az)
meanvT= self._initdf._estimatemeanvT(R,phi=az)
else:
warnings.warn("No '_estimateSigmaR2' etc. functions found for initdf in evolveddf; thus using potentially slow sigmaR2 etc functions",
galpyWarning)
sigmaR1= nu.sqrt(self._initdf.sigmaR2(R,phi=az,use_physical=False))
sigmaT1= nu.sqrt(self._initdf.sigmaT2(R,phi=az,use_physical=False))
meanvR= self._initdf.meanvR(R,phi=az,use_physical=False)
meanvT= self._initdf.meanvT(R,phi=az,use_physical=False)
if _PROFILE: #pragma: no cover
setup_time= (time_module.time()-start)
if not grid is None and isinstance(grid,bool) and grid:
if not hierarchgrid:
if _PROFILE: #pragma: no cover
start= time_module.time()
grido= self._buildvgrid(R,az,nsigma,t,
sigmaR1,sigmaT1,meanvR,meanvT,
gridpoints,print_progress,
integrate_method,deriv)
if _PROFILE: #pragma: no cover
grid_time= (time_module.time()-start)
print(setup_time/(setup_time+grid_time), \
grid_time/(setup_time+grid_time), \
setup_time+grid_time)
if returnGrid:
return (self._vmomentsurfacemassGrid(n,m,grido),grido)
else:
return self._vmomentsurfacemassGrid(n,m,grido)
else: #hierarchical grid
grido= evolveddiskdfHierarchicalGrid(self,R,az,nsigma,t,
sigmaR1,sigmaT1,meanvR,
meanvT,
gridpoints,nlevels,deriv,
print_progress=print_progress)
if returnGrid:
return (self._vmomentsurfacemassHierarchicalGrid(n,m,
grido),
grido)
else:
return self._vmomentsurfacemassHierarchicalGrid(n,m,grido)
#Calculate the initdf moment and then calculate the ratio
initvmoment= self._initdf.vmomentsurfacemass(R,n,m,nsigma=nsigma,
phi=phi)
if initvmoment == 0.: initvmoment= 1.
norm= sigmaR1**(n+1)*sigmaT1**(m+1)*initvmoment
if isinstance(t,(list,nu.ndarray)):
raise IOError("list of times is only supported with grid-based calculation")
return dblquad(_vmomentsurfaceIntegrand,
meanvT/sigmaT1-nsigma,
meanvT/sigmaT1+nsigma,
lambda x: meanvR/sigmaR1
-nu.sqrt(nsigma**2.-(x-meanvT/sigmaT1)**2.),
lambda x: meanvR/sigmaR1
+nu.sqrt(nsigma**2.-(x-meanvT/sigmaT1)**2.),
(R,az,self,n,m,sigmaR1,sigmaT1,t,initvmoment),
epsrel=epsrel,epsabs=epsabs)[0]*norm | NAME:
vmomentsurfacemass
PURPOSE:
calculate the an arbitrary moment of the velocity distribution at (R,phi) times the surfacmass
INPUT:
R - radius at which to calculate the moment (in natural units)
phi= azimuth (rad unless deg=True)
n - vR^n
m - vT^m
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous, but not too generous)
deg= azimuth is in degree (default=False)
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid; if this was created for a list of times, moments are calculated for each time
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
print_progress= if True, print progress updates
integrate_method= orbit.integrate method argument
deriv= None, 'R', or 'phi': calculates derivative of the moment wrt R or phi **onnly with grid options**
OUTPUT:
<vR^n vT^m x surface-mass> at R,phi (no support for units)
COMMENT:
grid-based calculation is the only one that is heavily tested (although the test suite also tests the direct calculation)
HISTORY:
2011-03-30 - Written - Bovy (NYU) |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
MetricCommon.get_arguments(self)
if self.args.metricName is not None:
self.metricName = self.args.metricName
if self.args.displayName is not None:
self.displayName = self.args.displayName
if self.args.displayNameShort is not None:
self.displayNameShort = self.args.displayNameShort
if self.args.description is not None:
self.description = self.args.description
if self.args.aggregate is not None:
self.aggregate = self.args.aggregate
if self.args.unit is not None:
self.unit = self.args.unit
if self.args.resolution is not None:
self.resolution = self.args.resolution
if self.args.isDisabled is not None:
self.isDisabled = self.args.isDisabled
if self.args.type is not None:
self.type = self.args.type
data = {}
if self.metricName is not None:
data['name'] = self.metricName
if self.displayName is not None:
data['displayName'] = self.displayName
if self.displayNameShort is not None:
data['displayNameShort'] = self.displayNameShort
if self.description is not None:
data['description'] = self.description
if self.aggregate is not None:
data['defaultAggregate'] = self.aggregate
if self.unit is not None:
data['unit'] = self.unit
if self.resolution is not None:
data['defaultResolutionMS'] = self.resolution
if self.isDisabled is not None:
data['isDisabled'] = True if self.isDisabled == 'yes' else False
if self.type is not None:
data['type'] = self.type
self.path = "v1/metrics/{0}".format(self.metricName)
self.data = json.dumps(data, sort_keys=True)
self.headers = {'Content-Type': 'application/json', "Accept": "application/json"} | Extracts the specific arguments of this CLI |
def iter(self, order='', sort=True):
"""Return a :class:`tableiter` object on this column."""
from casacore.tables import tableiter
return tableiter(self._table, [self._column], order, sort) | Return a :class:`tableiter` object on this column. |
def content(self):
"""
Returns lazily content of the FileNode. If possible, would try to
decode content from UTF-8.
"""
content = self._get_content()
if bool(content and '\0' in content):
return content
return safe_unicode(content) | Returns lazily content of the FileNode. If possible, would try to
decode content from UTF-8. |
def subscribe(self, objectID, varIDs=(tc.VAR_ROAD_ID, tc.VAR_LANEPOSITION), begin=0, end=2**31 - 1):
"""subscribe(string, list(integer), int, int) -> None
Subscribe to one or more object values for the given interval.
"""
Domain.subscribe(self, objectID, varIDs, begin, end) | subscribe(string, list(integer), int, int) -> None
Subscribe to one or more object values for the given interval. |
def apply_trend_constraint(self, limit, dt, distribution_skip=False,
**kwargs):
"""
Constrains change in RV to be less than limit over time dt.
Only works if ``dRV`` and ``Plong`` attributes are defined
for population.
:param limit:
Radial velocity limit on trend. Must be
:class:`astropy.units.Quantity` object, or
else interpreted as m/s.
:param dt:
Time baseline of RV observations. Must be
:class:`astropy.units.Quantity` object; else
interpreted as days.
:param distribution_skip:
This is by default ``True``. *To be honest, I'm not
exactly sure why. Might be important, might not
(don't remember).*
:param **kwargs:
Additional keyword arguments passed to
:func:`StarPopulation.apply_constraint`.
"""
if type(limit) != Quantity:
limit = limit * u.m/u.s
if type(dt) != Quantity:
dt = dt * u.day
dRVs = np.absolute(self.dRV(dt))
c1 = UpperLimit(dRVs, limit)
c2 = LowerLimit(self.Plong, dt*4)
self.apply_constraint(JointConstraintOr(c1,c2,name='RV monitoring',
Ps=self.Plong,dRVs=dRVs),
distribution_skip=distribution_skip, **kwargs) | Constrains change in RV to be less than limit over time dt.
Only works if ``dRV`` and ``Plong`` attributes are defined
for population.
:param limit:
Radial velocity limit on trend. Must be
:class:`astropy.units.Quantity` object, or
else interpreted as m/s.
:param dt:
Time baseline of RV observations. Must be
:class:`astropy.units.Quantity` object; else
interpreted as days.
:param distribution_skip:
This is by default ``True``. *To be honest, I'm not
exactly sure why. Might be important, might not
(don't remember).*
:param **kwargs:
Additional keyword arguments passed to
:func:`StarPopulation.apply_constraint`. |
def _set_get_media_detail(self, v, load=False):
"""
Setter method for get_media_detail, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_media_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_media_detail() directly.
YANG Description: This is a function that serves to return the media
properities of all the interfaces of the managed entity.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_media_detail.get_media_detail, is_leaf=True, yang_name="get-media-detail", rest_name="get-media-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getmediaport-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_media_detail must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_media_detail.get_media_detail, is_leaf=True, yang_name="get-media-detail", rest_name="get-media-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getmediaport-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='rpc', is_config=True)""",
})
self.__get_media_detail = t
if hasattr(self, '_set'):
self._set() | Setter method for get_media_detail, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_media_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_media_detail() directly.
YANG Description: This is a function that serves to return the media
properities of all the interfaces of the managed entity. |
def read_file(file_path_name):
"""
Read the content of the specified file.
@param file_path_name: path and name of the file to read.
@return: content of the specified file.
"""
with io.open(os.path.join(os.path.dirname(__file__), file_path_name), mode='rt', encoding='utf-8') as fd:
return fd.read() | Read the content of the specified file.
@param file_path_name: path and name of the file to read.
@return: content of the specified file. |
def upload(client, source_dir):
"""Upload listing files in source_dir. folder herachy."""
print('')
print('upload store listings')
print('---------------------')
listings_folder = os.path.join(source_dir, 'listings')
langfolders = filter(os.path.isdir, list_dir_abspath(listings_folder))
for language_dir in langfolders:
language = os.path.basename(language_dir)
with open(os.path.join(language_dir, 'listing.json')) as listings_file:
listing = json.load(listings_file)
listing_response = client.update(
'listings', language=language, body=listing)
print(' Listing for language %s was updated.' %
listing_response['language']) | Upload listing files in source_dir. folder herachy. |
def _generateForTokenSecurity(self,
username, password,
tokenUrl,
expiration=None,
client='requestip'):
""" generates a token for a feature service """
query_dict = {'username': username,
'password': password,
'expiration':str(_defaultTokenExpiration),
'client': client,
'f': 'json'}
if client == "referer":
query_dict['referer'] = self._referer_url
if expiration is not None:
query_dict['expiration'] = expiration
secHandler = None
if self.cookiejar is not None:
secHandler = self
if secHandler is not None:
secHandler._method = "HANDLER"
token = self._post(url=tokenUrl,
param_dict=query_dict,
securityHandler=secHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if self.cookiejar is not None:
if secHandler is not None:
secHandler._method = "TOKEN"
if 'error' in token:
self._token = None
self._token_created_on = None
self._token_expires_on = None
self._expires_in = None
return token
elif 'status' in token:
self._token = None
self._token_created_on = None
self._token_expires_on = None
self._expires_in = None
#print token['message']
return token
else:
self._token = token['token']
self._token_created_on = datetime.datetime.now()
self._token_expires_on = datetime.datetime.fromtimestamp(token['expires'] /1000) - \
datetime.timedelta(seconds=1)
self._expires_in = (self._token_expires_on - self._token_created_on).total_seconds()
return token['token'] | generates a token for a feature service |
def _fluent_params(self, fluents, ordering) -> FluentParamsList:
'''Returns the instantiated `fluents` for the given `ordering`.
For each fluent in `fluents`, it instantiates each parameter
type w.r.t. the contents of the object table.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings.
'''
variables = []
for fluent_id in ordering:
fluent = fluents[fluent_id]
param_types = fluent.param_types
objects = ()
names = []
if param_types is None:
names = [fluent.name]
else:
objects = tuple(self.object_table[ptype]['objects'] for ptype in param_types)
for values in itertools.product(*objects):
values = ','.join(values)
var_name = '{}({})'.format(fluent.name, values)
names.append(var_name)
variables.append((fluent_id, names))
return tuple(variables) | Returns the instantiated `fluents` for the given `ordering`.
For each fluent in `fluents`, it instantiates each parameter
type w.r.t. the contents of the object table.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings. |
def hide(self):
"""Hide the window."""
self.tk.withdraw()
self._visible = False
if self._modal:
self.tk.grab_release() | Hide the window. |
def data(self, index, role=Qt.DisplayRole):
"""return data depending on index, Qt::ItemDataRole and data type of the column.
Args:
index (QtCore.QModelIndex): Index to define column and row you want to return
role (Qt::ItemDataRole): Define which data you want to return.
Returns:
None if index is invalid
None if role is none of: DisplayRole, EditRole, CheckStateRole, DATAFRAME_ROLE
if role DisplayRole:
unmodified _dataFrame value if column dtype is object (string or unicode).
_dataFrame value as int or long if column dtype is in _intDtypes.
_dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).
None if column dtype is in _boolDtypes.
QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.
if role EditRole:
unmodified _dataFrame value if column dtype is object (string or unicode).
_dataFrame value as int or long if column dtype is in _intDtypes.
_dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).
_dataFrame value as bool if column dtype is in _boolDtypes.
QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.
if role CheckStateRole:
Qt.Checked or Qt.Unchecked if dtype is numpy.bool_ otherwise None for all other dtypes.
if role DATAFRAME_ROLE:
unmodified _dataFrame value.
raises TypeError if an unhandled dtype is found in column.
"""
if not index.isValid():
return None
def convertValue(row, col, columnDtype):
value = None
if columnDtype == object:
value = self._dataFrame.ix[row, col]
elif columnDtype in self._floatDtypes:
value = round(float(self._dataFrame.ix[row, col]), self._float_precisions[str(columnDtype)])
elif columnDtype in self._intDtypes:
value = int(self._dataFrame.ix[row, col])
elif columnDtype in self._boolDtypes:
# TODO this will most likely always be true
# See: http://stackoverflow.com/a/715455
# well no: I am mistaken here, the data is already in the dataframe
# so its already converted to a bool
value = bool(self._dataFrame.ix[row, col])
elif columnDtype in self._dateDtypes:
#print numpy.datetime64(self._dataFrame.ix[row, col])
value = pandas.Timestamp(self._dataFrame.ix[row, col])
value = QtCore.QDateTime.fromString(str(value), self.timestampFormat)
#print value
# else:
# raise TypeError, "returning unhandled data type"
return value
row = self._dataFrame.index[index.row()]
col = self._dataFrame.columns[index.column()]
columnDtype = self._dataFrame[col].dtype
if role == Qt.DisplayRole:
# return the value if you wanne show True/False as text
if columnDtype == numpy.bool:
result = self._dataFrame.ix[row, col]
else:
result = convertValue(row, col, columnDtype)
elif role == Qt.EditRole:
result = convertValue(row, col, columnDtype)
elif role == Qt.CheckStateRole:
if columnDtype == numpy.bool_:
if convertValue(row, col, columnDtype):
result = Qt.Checked
else:
result = Qt.Unchecked
else:
result = None
elif role == DATAFRAME_ROLE:
result = self._dataFrame.ix[row, col]
else:
result = None
return result | return data depending on index, Qt::ItemDataRole and data type of the column.
Args:
index (QtCore.QModelIndex): Index to define column and row you want to return
role (Qt::ItemDataRole): Define which data you want to return.
Returns:
None if index is invalid
None if role is none of: DisplayRole, EditRole, CheckStateRole, DATAFRAME_ROLE
if role DisplayRole:
unmodified _dataFrame value if column dtype is object (string or unicode).
_dataFrame value as int or long if column dtype is in _intDtypes.
_dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).
None if column dtype is in _boolDtypes.
QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.
if role EditRole:
unmodified _dataFrame value if column dtype is object (string or unicode).
_dataFrame value as int or long if column dtype is in _intDtypes.
_dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).
_dataFrame value as bool if column dtype is in _boolDtypes.
QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.
if role CheckStateRole:
Qt.Checked or Qt.Unchecked if dtype is numpy.bool_ otherwise None for all other dtypes.
if role DATAFRAME_ROLE:
unmodified _dataFrame value.
raises TypeError if an unhandled dtype is found in column. |
def orchestrate_high(data, test=None, queue=False, pillar=None, **kwargs):
'''
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_high '{
stage_one:
{salt.state: [{tgt: "db*"}, {sls: postgres_setup}]},
stage_two:
{salt.state: [{tgt: "web*"}, {sls: apache_setup}, {
require: [{salt: stage_one}],
}]},
}'
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.high'](
data,
test=None,
queue=False,
pillar=pillar,
**kwargs)
ret = {minion.opts['id']: running}
__jid_event__.fire_event({'data': ret, 'outputter': 'highstate'}, 'progress')
return ret | Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_high '{
stage_one:
{salt.state: [{tgt: "db*"}, {sls: postgres_setup}]},
stage_two:
{salt.state: [{tgt: "web*"}, {sls: apache_setup}, {
require: [{salt: stage_one}],
}]},
}' |
def to_dict(self, *, include_keys=None, exclude_keys=None, use_default_excludes=True):
"""Converts the class to a dictionary.
:include_keys: if not None, only the attrs given will be included.
:exclude_keys: if not None, all attrs except those listed will be included, with respect to
use_default_excludes.
:use_default_excludes: if True, then the class-level exclude_keys_serialize will be combined
with exclude_keys if given, or used in place of exlcude_keys if not given.
"""
data = self.__dict__
if include_keys:
return pick(data, include_keys, transform=self._other_to_dict)
else:
skeys = self.exclude_keys_serialize if use_default_excludes else None
ekeys = exclude_keys
return exclude(
data,
lambda k: (skeys is not None and k in skeys) or (ekeys is not None and k in ekeys),
transform=self._other_to_dict) | Converts the class to a dictionary.
:include_keys: if not None, only the attrs given will be included.
:exclude_keys: if not None, all attrs except those listed will be included, with respect to
use_default_excludes.
:use_default_excludes: if True, then the class-level exclude_keys_serialize will be combined
with exclude_keys if given, or used in place of exlcude_keys if not given. |
def compute(cls, observation, prediction, key=None):
"""Compute a ratio from an observation and a prediction."""
assert isinstance(observation, (dict, float, int, pq.Quantity))
assert isinstance(prediction, (dict, float, int, pq.Quantity))
obs, pred = cls.extract_means_or_values(observation, prediction,
key=key)
value = pred / obs
value = utils.assert_dimensionless(value)
return RatioScore(value) | Compute a ratio from an observation and a prediction. |
def _projection_to_paths(cls, root_key, projection):
"""
Expand a $sub/$sub. projection to a single projection of True (if
inclusive) or a map of full paths (e.g `employee.company.tel`).
"""
# Referenced projections are handled separately so just flag the
# reference field to true.
if '$ref' in projection:
return True
inclusive = True
sub_projection = {}
for key, value in projection.items():
if key in ['$sub', '$sub.']:
continue
if key.startswith('$'):
sub_projection[root_key] = {key: value}
inclusive = False
continue
sub_key = root_key + '.' + key
if isinstance(value, dict):
sub_value = cls._projection_to_paths(sub_key, value)
if isinstance(sub_value, dict):
sub_projection.update(sub_value)
else:
sub_projection[sub_key] = True
else:
sub_projection[sub_key] = True
inclusive = False
if inclusive:
# No specific keys so this is inclusive
return True
return sub_projection | Expand a $sub/$sub. projection to a single projection of True (if
inclusive) or a map of full paths (e.g `employee.company.tel`). |
def _process_priv_part(perms):
'''
Process part
'''
_tmp = {}
previous = None
for perm in perms:
if previous is None:
_tmp[_PRIVILEGES_MAP[perm]] = False
previous = _PRIVILEGES_MAP[perm]
else:
if perm == '*':
_tmp[previous] = True
else:
_tmp[_PRIVILEGES_MAP[perm]] = False
previous = _PRIVILEGES_MAP[perm]
return _tmp | Process part |
def get_lib_volume_mounts(base_lib_name, assembled_specs):
""" Returns a list of the formatted volume specs for a lib"""
volumes = [_get_lib_repo_volume_mount(assembled_specs['libs'][base_lib_name])]
volumes.append(get_command_files_volume_mount(base_lib_name, test=True))
for lib_name in assembled_specs['libs'][base_lib_name]['depends']['libs']:
lib_spec = assembled_specs['libs'][lib_name]
volumes.append(_get_lib_repo_volume_mount(lib_spec))
return volumes | Returns a list of the formatted volume specs for a lib |
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code."""
try: lines, lnum = findsource(object)
except IOError: return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ['', '#']:
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '') | Get lines of comments immediately preceding an object's source code. |
def on_train_begin(self, **kwargs):
"Create the optimizers for the generator and critic if necessary, initialize smootheners."
if not getattr(self,'opt_gen',None):
self.opt_gen = self.opt.new([nn.Sequential(*flatten_model(self.generator))])
else: self.opt_gen.lr,self.opt_gen.wd = self.opt.lr,self.opt.wd
if not getattr(self,'opt_critic',None):
self.opt_critic = self.opt.new([nn.Sequential(*flatten_model(self.critic))])
else: self.opt_critic.lr,self.opt_critic.wd = self.opt.lr,self.opt.wd
self.gen_mode = self.gen_first
self.switch(self.gen_mode)
self.closses,self.glosses = [],[]
self.smoothenerG,self.smoothenerC = SmoothenValue(self.beta),SmoothenValue(self.beta)
#self.recorder.no_val=True
self.recorder.add_metric_names(['gen_loss', 'disc_loss'])
self.imgs,self.titles = [],[] | Create the optimizers for the generator and critic if necessary, initialize smootheners. |
def is_downloaded(self, file_path):
"""
Check if the data file is already downloaded.
"""
if os.path.exists(file_path):
self.chatbot.logger.info('File is already downloaded')
return True
return False | Check if the data file is already downloaded. |
def genlet(generator_function=None, prime=True):
"""
Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink`
:param generator_function: the generator function to convert
:type generator_function: generator
:param prime: advance the generator to the next/first yield
:type prime: bool
When used as a decorator, this function can also be called with and without keywords.
.. code:: python
@genlet
def pingpong():
"Chainlet that passes on its value"
last = yield
while True:
last = yield last
@genlet(prime=True)
def produce():
"Chainlet that produces a value"
while True:
yield time.time()
@genlet(True)
def read(iterable):
"Chainlet that reads from an iterable"
for item in iterable:
yield item
"""
if generator_function is None:
return GeneratorLink.wraplet(prime=prime)
elif not callable(generator_function):
return GeneratorLink.wraplet(prime=generator_function)
return GeneratorLink.wraplet(prime=prime)(generator_function) | Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink`
:param generator_function: the generator function to convert
:type generator_function: generator
:param prime: advance the generator to the next/first yield
:type prime: bool
When used as a decorator, this function can also be called with and without keywords.
.. code:: python
@genlet
def pingpong():
"Chainlet that passes on its value"
last = yield
while True:
last = yield last
@genlet(prime=True)
def produce():
"Chainlet that produces a value"
while True:
yield time.time()
@genlet(True)
def read(iterable):
"Chainlet that reads from an iterable"
for item in iterable:
yield item |
def add(self, item, position=5):
"""Add an item to the list unless it is already present.
If the item is an expression, then a semicolon will be appended to it
in the final compiled code.
"""
if item in self.items:
return
self.items[item] = position
self._add_dep(item)
self.order = None
self.changed(code_changed=True) | Add an item to the list unless it is already present.
If the item is an expression, then a semicolon will be appended to it
in the final compiled code. |
def set_jinja2_silent_none(config): # pragma: no cover
""" if variable is None print '' instead of 'None'
"""
config.commit()
jinja2_env = config.get_jinja2_environment()
jinja2_env.finalize = _silent_none | if variable is None print '' instead of 'None' |
def _parse_ignores(self):
""" Parse the ignores setting from the pylintrc file if available. """
error_message = (
colorama.Fore.RED
+ "{} does not appear to be a valid pylintrc file".format(self.rcfile)
+ colorama.Fore.RESET
)
if not os.path.isfile(self.rcfile):
if not self._is_using_default_rcfile():
print(error_message)
sys.exit(1)
else:
return
config = configparser.ConfigParser()
try:
config.read(self.rcfile)
except configparser.MissingSectionHeaderError:
print(error_message)
sys.exit(1)
if config.has_section("MASTER") and config.get("MASTER", "ignore"):
self.ignore_folders += config.get("MASTER", "ignore").split(",") | Parse the ignores setting from the pylintrc file if available. |
def t_stringdollar_rbrace(self, t):
r'\}'
t.lexer.braces -= 1
if t.lexer.braces == 0:
# End of the dollar brace, back to the rest of the string
t.lexer.begin('string') | r'\} |
def perform_update(self, serializer):
"""creates a record in the `bulbs.promotion.PZoneHistory`
:param obj: the instance saved
:param created: boolean expressing if the object was newly created (`False` if updated)
"""
instance = serializer.save()
# create history object
instance.history.create(data=instance.data) | creates a record in the `bulbs.promotion.PZoneHistory`
:param obj: the instance saved
:param created: boolean expressing if the object was newly created (`False` if updated) |
def _compute_attenuation(self, rup, dists, imt, C):
"""
Compute the second term of the equation described on p. 1866:
" [(c4 + c5 * M) * min{ log10(R), log10(70.) }] +
[(c4 + c5 * M) * max{ min{ log10(R/70.), log10(140./70.) }, 0.}] +
[(c8 + c9 * M) * max{ log10(R/140.), 0}] "
"""
vec = np.ones(len(dists.rrup))
a1 = (np.log10(np.sqrt(dists.rrup ** 2.0 + C['c11'] ** 2.0)),
np.log10(70. * vec))
a = np.column_stack([a1[0], a1[1]])
b3 = (np.log10(np.sqrt(dists.rrup ** 2.0 + C['c11'] ** 2.0) /
(70. * vec)),
np.log10((140. / 70.) * vec))
b2 = np.column_stack([b3[0], b3[1]])
b1 = ([np.min(b2, axis=1), 0. * vec])
b = np.column_stack([b1[0], b1[1]])
c1 = (np.log10(np.sqrt(dists.rrup ** 2.0 + C['c11'] ** 2.0) /
(140.) * vec), 0. * vec)
c = np.column_stack([c1[0], c1[1]])
return (((C['c4'] + C['c5'] * rup.mag) * np.min(a, axis=1)) +
((C['c6'] + C['c7'] * rup.mag) * np.max(b, axis=1)) +
((C['c8'] + C['c9'] * rup.mag) * np.max(c, axis=1))) | Compute the second term of the equation described on p. 1866:
" [(c4 + c5 * M) * min{ log10(R), log10(70.) }] +
[(c4 + c5 * M) * max{ min{ log10(R/70.), log10(140./70.) }, 0.}] +
[(c8 + c9 * M) * max{ log10(R/140.), 0}] " |
def at(self, instant):
"""Iterates (in chronological order) over all events that are occuring during `instant`.
Args:
instant (Arrow object)
"""
for event in self:
if event.begin <= instant <= event.end:
yield event | Iterates (in chronological order) over all events that are occuring during `instant`.
Args:
instant (Arrow object) |
def show_hide(self, *args):
"""Toggles the main window visibility
"""
log.debug("Show_hide called")
if self.forceHide:
self.forceHide = False
return
if not HidePrevention(self.window).may_hide():
return
if not self.win_prepare():
return
if not self.window.get_property('visible'):
log.info("Showing the terminal")
self.show()
self.set_terminal_focus()
return
# Disable the focus_if_open feature
# - if doesn't work seamlessly on all system
# - self.window.window.get_state doesn't provides us the right information on all
# systems, especially on MATE/XFCE
#
# if self.client.get_bool(KEY('/general/focus_if_open')):
# restore_focus = False
# if self.window.window:
# state = int(self.window.window.get_state())
# if ((state & GDK_WINDOW_STATE_STICKY or
# state & GDK_WINDOW_STATE_WITHDRAWN
# )):
# restore_focus = True
# else:
# restore_focus = True
# if not self.hidden:
# restore_focus = True
# if restore_focus:
# log.debug("DBG: Restoring the focus to the terminal")
# self.hide()
# self.show()
# self.window.window.focus()
# self.set_terminal_focus()
# return
log.info("Hiding the terminal")
self.hide() | Toggles the main window visibility |
def bank_account_number(self):
"""Return the IBAN's Bank Account Number."""
start = get_iban_spec(self.country_code).bban_split_pos + 4
return self._id[start:] | Return the IBAN's Bank Account Number. |
def stream_file(self, url, folder=None, filename=None, overwrite=False):
# type: (str, Optional[str], Optional[str], bool) -> str
"""Stream file from url and store in provided folder or temporary folder if no folder supplied.
Must call setup method first.
Args:
url (str): URL to download
filename (Optional[str]): Filename to use for downloaded file. Defaults to None (derive from the url).
folder (Optional[str]): Folder to download it to. Defaults to None (temporary folder).
overwrite (bool): Whether to overwrite existing file. Defaults to False.
Returns:
str: Path of downloaded file
"""
path = self.get_path_for_url(url, folder, filename, overwrite)
f = None
try:
f = open(path, 'wb')
for chunk in self.response.iter_content(chunk_size=10240):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return f.name
except Exception as e:
raisefrom(DownloadError, 'Download of %s failed in retrieval of stream!' % url, e)
finally:
if f:
f.close() | Stream file from url and store in provided folder or temporary folder if no folder supplied.
Must call setup method first.
Args:
url (str): URL to download
filename (Optional[str]): Filename to use for downloaded file. Defaults to None (derive from the url).
folder (Optional[str]): Folder to download it to. Defaults to None (temporary folder).
overwrite (bool): Whether to overwrite existing file. Defaults to False.
Returns:
str: Path of downloaded file |
def get_property(obj, name):
"""
Recursively gets value of object or its subobjects property specified by its name.
The object can be a user defined object, map or array.
The property name correspondently must be object property, map key or array index.
:param obj: an object to read property from.
:param name: a name of the property to get.
:return: the property value or null if property doesn't exist or introspection failed.
"""
if obj == None or name == None:
return None
names = name.split(".")
if names == None or len(names) == 0:
return None
return RecursiveObjectReader._perform_get_property(obj, names, 0) | Recursively gets value of object or its subobjects property specified by its name.
The object can be a user defined object, map or array.
The property name correspondently must be object property, map key or array index.
:param obj: an object to read property from.
:param name: a name of the property to get.
:return: the property value or null if property doesn't exist or introspection failed. |
def finish(
self,
width=1,
color=None,
fill=None,
roundCap=False,
dashes=None,
even_odd=False,
morph=None,
closePath=True
):
"""Finish the current drawing segment.
Notes:
Apply stroke and fill colors, dashes, line style and width, or
morphing. Also determines whether any open path should be closed
by a connecting line to its start point.
"""
if self.draw_cont == "": # treat empty contents as no-op
return
color_str = ColorCode(color, "c") # ensure proper color string
fill_str = ColorCode(fill, "f") # ensure proper fill string
if width != 1:
self.draw_cont += "%g w\n" % width
if roundCap:
self.draw_cont += "%i J %i j\n" % (roundCap, roundCap)
if dashes is not None and len(dashes) > 0:
self.draw_cont += "%s d\n" % dashes
if closePath:
self.draw_cont += "h\n"
self.lastPoint = None
if color is not None:
self.draw_cont += color_str
if fill is not None:
self.draw_cont += fill_str
if not even_odd:
self.draw_cont += "B\n"
else:
self.draw_cont += "B*\n"
else:
self.draw_cont += "S\n"
if CheckMorph(morph):
m1 = Matrix(1, 0, 0, 1, morph[0].x + self.x,
self.height - morph[0].y - self.y)
mat = ~m1 * morph[1] * m1
self.draw_cont = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat) + self.draw_cont
self.totalcont += "\nq\n" + self.draw_cont + "Q\n"
self.draw_cont = ""
self.lastPoint = None
return | Finish the current drawing segment.
Notes:
Apply stroke and fill colors, dashes, line style and width, or
morphing. Also determines whether any open path should be closed
by a connecting line to its start point. |
def write_flows_to_gssha_time_series_xys(self,
path_to_output_file,
series_name,
series_id,
river_index=None,
river_id=None,
date_search_start=None,
date_search_end=None,
daily=False,
filter_mode="mean"):
"""
Write out RAPID output to GSSHA WMS time series xys file.
Parameters
----------
path_to_output_file: str
Path to the output xys file.
series_name: str
The name for the series.
series_id: int
The ID to give the series.
river_index: :obj:`datetime.datetime`, optional
This is the index of the river in the file you want the
streamflow for.
river_id: :obj:`datetime.datetime`, optional
This is the river ID that you want the streamflow for.
date_search_start: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the minimum date for
starting.
date_search_end: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the maximum date for
ending.
daily: bool, optional
If True and the file is CF-Compliant, write out daily flows.
filter_mode: str, optional
You can get the daily average "mean" or the maximum "max".
Defauls is "mean".
Example writing entire time series to file:
.. code:: python
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_{0}.xys'.format(river_id),
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_id=river_id)
Example writing entire time series as daily average to file:
.. code:: python
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
# NOTE: Getting the river index is not necessary
# this is just an example of how to use this
river_index = qout_nc.get_river_index(river_id)
# if file is CF compliant, you can write out daily average
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_daily.xys',
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_index=river_index,
daily=True)
Example writing subset of time series as daily maximum to file:
.. code:: python
from datetime import datetime
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
# NOTE: Getting the river index is not necessary
# this is just an example of how to use this
river_index = qout_nc.get_river_index(river_id)
# if file is CF compliant, you can filter by date and
# get daily values
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_daily_date_filter.xys',
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_index=river_index,
date_search_start=datetime(2002, 8, 31),
date_search_end=datetime(2002, 9, 15),
daily=True,
filter_mode="max")
"""
if river_id is not None:
river_index = self.get_river_index(river_id)
elif river_id is None and river_index is None:
raise ValueError(" Need reach id or reach index ...")
self.raise_time_valid()
# analyze and write
qout_df = self.get_qout_index(river_index,
date_search_start=date_search_start,
date_search_end=date_search_end,
daily=daily,
filter_mode=filter_mode,
as_dataframe=True)
with open_csv(path_to_output_file, 'w') as out_ts:
out_ts.write("XYS {0} {1} \"{2}\"\r\n".format(series_id,
len(qout_df.index),
series_name))
for index, pd_row in qout_df.iterrows():
date_str = index.strftime("%m/%d/%Y %I:%M:%S %p")
out_ts.write("\"{0}\" {1:.5f}\n".format(date_str,
pd_row[0])) | Write out RAPID output to GSSHA WMS time series xys file.
Parameters
----------
path_to_output_file: str
Path to the output xys file.
series_name: str
The name for the series.
series_id: int
The ID to give the series.
river_index: :obj:`datetime.datetime`, optional
This is the index of the river in the file you want the
streamflow for.
river_id: :obj:`datetime.datetime`, optional
This is the river ID that you want the streamflow for.
date_search_start: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the minimum date for
starting.
date_search_end: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the maximum date for
ending.
daily: bool, optional
If True and the file is CF-Compliant, write out daily flows.
filter_mode: str, optional
You can get the daily average "mean" or the maximum "max".
Defauls is "mean".
Example writing entire time series to file:
.. code:: python
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_{0}.xys'.format(river_id),
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_id=river_id)
Example writing entire time series as daily average to file:
.. code:: python
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
# NOTE: Getting the river index is not necessary
# this is just an example of how to use this
river_index = qout_nc.get_river_index(river_id)
# if file is CF compliant, you can write out daily average
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_daily.xys',
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_index=river_index,
daily=True)
Example writing subset of time series as daily maximum to file:
.. code:: python
from datetime import datetime
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
# NOTE: Getting the river index is not necessary
# this is just an example of how to use this
river_index = qout_nc.get_river_index(river_id)
# if file is CF compliant, you can filter by date and
# get daily values
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_daily_date_filter.xys',
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_index=river_index,
date_search_start=datetime(2002, 8, 31),
date_search_end=datetime(2002, 9, 15),
daily=True,
filter_mode="max") |
def authorize(self, me, state=None, next_url=None, scope='read'):
"""Authorize a user via Micropub.
Args:
me (string): the authing user's URL. if it does not begin with
https?://, http:// will be prepended.
state (string, optional): passed through the whole auth process,
useful if you want to maintain some state, e.g. the starting page
to return to when auth is complete.
next_url (string, optional): deprecated and replaced by the more
general "state". still here for backward compatibility.
scope (string, optional): a space-separated string of micropub
scopes. 'read' by default.
Returns:
a redirect to the user's specified authorization
https://indieauth.com/auth if none is provided.
"""
redirect_url = flask.url_for(
self.flask_endpoint_for_function(self._authorized_handler),
_external=True)
return self._start_indieauth(
me, redirect_url, state or next_url, scope) | Authorize a user via Micropub.
Args:
me (string): the authing user's URL. if it does not begin with
https?://, http:// will be prepended.
state (string, optional): passed through the whole auth process,
useful if you want to maintain some state, e.g. the starting page
to return to when auth is complete.
next_url (string, optional): deprecated and replaced by the more
general "state". still here for backward compatibility.
scope (string, optional): a space-separated string of micropub
scopes. 'read' by default.
Returns:
a redirect to the user's specified authorization
https://indieauth.com/auth if none is provided. |
def get_notify_observers_kwargs(self):
""" Return the mapping between the metrics call and the iterated
variables.
Return
----------
notify_observers_kwargs: dict,
the mapping between the iterated variables.
"""
return {
'u_new': self._u_new,
'x_new': self._x_new,
'y_new': self._y_new,
'z_new': self._z,
'xi': self._xi,
'sigma': self._sigma,
't': self._t_new,
'idx': self.idx,
} | Return the mapping between the metrics call and the iterated
variables.
Return
----------
notify_observers_kwargs: dict,
the mapping between the iterated variables. |
def cmp(self,junc,tolerance=0):
""" output comparison and allow for tolerance if desired
* -1 if junc comes before self
* 1 if junc comes after self
* 0 if overlaps
* 2 if else
:param junc:
:param tolerance: optional search space (default=0, no tolerance)
:type junc: Junction
:type tolerance: int
:return: value of comparison
:rtype: int
"""
if self.overlaps(junc,tolerance):
return 0 #equal
if self.left.chr == junc.right.chr:
if self.left.start > junc.right.start:
return -1 #comes before
if self.right.chr == junc.left.chr:
if self.right.start < junc.right.start:
return 1 #comes after
return 2 | output comparison and allow for tolerance if desired
* -1 if junc comes before self
* 1 if junc comes after self
* 0 if overlaps
* 2 if else
:param junc:
:param tolerance: optional search space (default=0, no tolerance)
:type junc: Junction
:type tolerance: int
:return: value of comparison
:rtype: int |
def fuzzybreaks(scale, breaks=None, boundary=None,
binwidth=None, bins=30, right=True):
"""
Compute fuzzy breaks
For a continuous scale, fuzzybreaks "preserve" the range of
the scale. The fuzzing is close to numerical roundoff and
is visually imperceptible.
Parameters
----------
scale : scale
Scale
breaks : array_like
Sequence of break points. If provided and the scale is not
discrete, they are returned.
boundary : float
First break. If `None` a suitable on is computed using
the range of the scale and the binwidth.
binwidth : float
Separation between the breaks
bins : int
Number of bins
right : bool
If `True` the right edges of the bins are part of the
bin. If `False` then the left edges of the bins are part
of the bin.
Returns
-------
out : array_like
"""
# Bins for categorical data should take the width
# of one level, and should show up centered over
# their tick marks. All other parameters are ignored.
if isinstance(scale, scale_discrete):
breaks = scale.get_breaks()
return -0.5 + np.arange(1, len(breaks)+2)
else:
if breaks is not None:
breaks = scale.transform(breaks)
if breaks is not None:
return breaks
recompute_bins = binwidth is not None
srange = scale.limits
if binwidth is None or np.isnan(binwidth):
binwidth = (srange[1]-srange[0]) / bins
if boundary is None or np.isnan(boundary):
boundary = round_any(srange[0], binwidth, np.floor)
if recompute_bins:
bins = np.int(np.ceil((srange[1]-boundary)/binwidth))
# To minimise precision errors, we do not pass the boundary and
# binwidth into np.arange as params. The resulting breaks
# can then be adjusted with finer(epsilon based rather than
# some arbitrary small number) precision.
breaks = np.arange(boundary, srange[1]+binwidth, binwidth)
return _adjust_breaks(breaks, right) | Compute fuzzy breaks
For a continuous scale, fuzzybreaks "preserve" the range of
the scale. The fuzzing is close to numerical roundoff and
is visually imperceptible.
Parameters
----------
scale : scale
Scale
breaks : array_like
Sequence of break points. If provided and the scale is not
discrete, they are returned.
boundary : float
First break. If `None` a suitable on is computed using
the range of the scale and the binwidth.
binwidth : float
Separation between the breaks
bins : int
Number of bins
right : bool
If `True` the right edges of the bins are part of the
bin. If `False` then the left edges of the bins are part
of the bin.
Returns
-------
out : array_like |
def from_pypirc(pypi_repository):
""" Load configuration from .pypirc file, cached to only run once """
ret = {}
pypirc_locations = PYPIRC_LOCATIONS
for pypirc_path in pypirc_locations:
pypirc_path = os.path.expanduser(pypirc_path)
if os.path.isfile(pypirc_path):
parser = configparser.SafeConfigParser()
parser.read(pypirc_path)
if 'distutils' not in parser.sections():
continue
if 'index-servers' not in parser.options('distutils'):
continue
if pypi_repository not in parser.get('distutils', 'index-servers'):
continue
if pypi_repository in parser.sections():
for option in parser.options(pypi_repository):
ret[option] = parser.get(pypi_repository, option)
if not ret:
raise ConfigError(
'repository does not appear to be configured in pypirc ({})'.format(pypi_repository) +
', remember that it needs an entry in [distutils] and its own section'
)
return ret | Load configuration from .pypirc file, cached to only run once |
def set_gae_attributes(span):
"""Set the GAE environment common attributes."""
for env_var, attribute_key in GAE_ATTRIBUTES.items():
attribute_value = os.environ.get(env_var)
if attribute_value is not None:
pair = {attribute_key: attribute_value}
pair_attrs = Attributes(pair)\
.format_attributes_json()\
.get('attributeMap')
_update_attr_map(span, pair_attrs) | Set the GAE environment common attributes. |
def get_render(name, data, trans='en'):
"""
Render string based on template
:param name: -- full template name
:type name: str,unicode
:param data: -- dict of rendered vars
:type data: dict
:param trans: -- translation for render. Default 'en'.
:type trans: str,unicode
:return: -- rendered string
:rtype: str,unicode
"""
translation.activate(trans)
config = loader.get_template(name)
result = config.render(data).replace('\r', '')
translation.deactivate()
return result | Render string based on template
:param name: -- full template name
:type name: str,unicode
:param data: -- dict of rendered vars
:type data: dict
:param trans: -- translation for render. Default 'en'.
:type trans: str,unicode
:return: -- rendered string
:rtype: str,unicode |
def findall(self, string):
""" Parse string, returning all outputs as parsed by functions
"""
output = []
for match in self.pattern.findall(string):
if hasattr(match, 'strip'):
match = [match]
self._list_add(output, self.run(match))
return output | Parse string, returning all outputs as parsed by functions |
def _finishSphering(self):
"""
Compute normalization constants for each feature dimension
based on the collected training samples. Then normalize our
training samples using these constants (so that each input
dimension has mean and variance of zero and one, respectively.)
Then feed these "sphered" training samples into the underlying
SVM model.
"""
# If we are sphering our data, we need to compute the
# per-dimension normalization constants
# First normalize the means (to zero)
self._normOffset = self._samples.mean(axis=0) * -1.0
self._samples += self._normOffset
# Now normalize the variances (to one). However, we need to be
# careful because the variance could conceivably be zero for one
# or more dimensions.
variance = self._samples.var(axis=0)
variance[numpy.where(variance == 0.0)] = 1.0
self._normScale = 1.0 / numpy.sqrt(variance)
self._samples *= self._normScale
# Now feed each "sphered" sample into the SVM library
for sampleIndex in range(len(self._labels)):
self._knn.learn(self._samples[sampleIndex],
self._labels[sampleIndex],
self._partitions[sampleIndex]) | Compute normalization constants for each feature dimension
based on the collected training samples. Then normalize our
training samples using these constants (so that each input
dimension has mean and variance of zero and one, respectively.)
Then feed these "sphered" training samples into the underlying
SVM model. |
def layout(self, slide):
""" Return layout information for slide """
image = Image.new('RGB', (WIDTH, HEIGHT), 'black')
draw = ImageDraw.Draw(image)
draw.font = self.font
self.vertical_layout(draw, slide)
self.horizontal_layout(draw, slide)
return slide | Return layout information for slide |
def main():
"""Main part of the download script."""
# Read config file. This has to get updated via git
project_root = utils.get_project_root()
infofile = os.path.join(project_root, "raw-datasets/info.yml")
logging.info("Read '%s'...", infofile)
with open(infofile, 'r') as ymlfile:
datasets = yaml.load(ymlfile)
for dataset in datasets:
local_path_file = os.path.join(project_root, dataset['online_path'])
i = 0
while not is_file_consistent(local_path_file, dataset['md5']) and i < 3:
if os.path.isfile(local_path_file):
local_file_size = os.path.getsize(local_path_file)
logging.info("MD5 codes differ. ")
logging.info("The file size of the downloaded file is %s.",
utils.sizeof_fmt(local_file_size))
logging.info("Download the file '%s'...", dataset['online_path'])
urllib.urlretrieve(dataset['url'], local_path_file)
i += 1
if i < 10:
logging.info("Found '%s'.", dataset['online_path']) | Main part of the download script. |
def config():
'''
Shows the current configuration.
'''
config = get_config()
print('Client version: {0}'.format(click.style(__version__, bold=True)))
print('API endpoint: {0}'.format(click.style(str(config.endpoint), bold=True)))
print('API version: {0}'.format(click.style(config.version, bold=True)))
print('Access key: "{0}"'.format(click.style(config.access_key, bold=True)))
masked_skey = config.secret_key[:6] + ('*' * 24) + config.secret_key[-10:]
print('Secret key: "{0}"'.format(click.style(masked_skey, bold=True)))
print('Signature hash type: {0}'.format(
click.style(config.hash_type, bold=True)))
print('Skip SSL certificate validation? {0}'.format(
click.style(str(config.skip_sslcert_validation), bold=True))) | Shows the current configuration. |
async def retrieve(self, url, **kwargs):
"""Issue API requests."""
try:
async with self.websession.request('GET', url, **kwargs) as res:
if res.status != 200:
raise Exception("Could not retrieve information from API")
if res.content_type == 'application/json':
return await res.json()
return await res.text()
except aiohttp.ClientError as err:
logging.error(err) | Issue API requests. |
def parse_variable(self, variable):
"""Method to parse an input or output variable.
**Example Variable**::
#App:1234:output!String
Args:
variable (string): The variable name to parse.
Returns:
(dictionary): Result of parsed string.
"""
data = None
if variable is not None:
variable = variable.strip()
if re.match(self._variable_match, variable):
var = re.search(self._variable_parse, variable)
data = {
'root': var.group(0),
'job_id': var.group(2),
'name': var.group(3),
'type': var.group(4),
}
return data | Method to parse an input or output variable.
**Example Variable**::
#App:1234:output!String
Args:
variable (string): The variable name to parse.
Returns:
(dictionary): Result of parsed string. |
def lowercase_to_camelcase(python_input, camelcase_input=None):
'''
a function to recursively convert data with lowercase key names into camelcase keys
:param camelcase_input: list or dictionary with lowercase keys
:param python_input: [optional] list or dictionary with default camelcase keys in output
:return: dictionary with camelcase key names
'''
if camelcase_input:
if python_input.__class__ != camelcase_input.__class__:
raise ValueError('camelcase_input type %s does not match python_input type %s' % (camelcase_input.__class__, python_input.__class__))
if isinstance(python_input, dict):
return _to_camelcase_dict(python_input, camelcase_input)
elif isinstance(python_input, list):
return _ingest_list(python_input, _to_camelcase_dict, camelcase_input)
else:
return python_input | a function to recursively convert data with lowercase key names into camelcase keys
:param camelcase_input: list or dictionary with lowercase keys
:param python_input: [optional] list or dictionary with default camelcase keys in output
:return: dictionary with camelcase key names |
def _is_number_match_OO(numobj1_in, numobj2_in):
"""Takes two phone number objects and compares them for equality."""
# We only care about the fields that uniquely define a number, so we copy these across explicitly.
numobj1 = _copy_core_fields_only(numobj1_in)
numobj2 = _copy_core_fields_only(numobj2_in)
# Early exit if both had extensions and these are different.
if (numobj1.extension is not None and
numobj2.extension is not None and
numobj1.extension != numobj2.extension):
return MatchType.NO_MATCH
country_code1 = numobj1.country_code
country_code2 = numobj2.country_code
# Both had country_code specified.
if country_code1 != 0 and country_code2 != 0:
if numobj1 == numobj2:
return MatchType.EXACT_MATCH
elif (country_code1 == country_code2 and
_is_national_number_suffix_of_other(numobj1, numobj2)):
# A SHORT_NSN_MATCH occurs if there is a difference because of the
# presence or absence of an 'Italian leading zero', the presence
# or absence of an extension, or one NSN being a shorter variant
# of the other.
return MatchType.SHORT_NSN_MATCH
# This is not a match.
return MatchType.NO_MATCH
# Checks cases where one or both country_code fields were not
# specified. To make equality checks easier, we first set the country_code
# fields to be equal.
numobj1.country_code = country_code2
# If all else was the same, then this is an NSN_MATCH.
if numobj1 == numobj2:
return MatchType.NSN_MATCH
if _is_national_number_suffix_of_other(numobj1, numobj2):
return MatchType.SHORT_NSN_MATCH
return MatchType.NO_MATCH | Takes two phone number objects and compares them for equality. |
def _get_temperature(self, data):
'''Return temperature in celsius'''
temp = (data[2] & ~(1 << 7)) + (data[3] / 100)
sign = (data[2] >> 7) & 1
if sign == 0:
return round(temp, 2)
return round(-1 * temp, 2) | Return temperature in celsius |
def _uptime_windows():
"""
Returns uptime in seconds or None, on Windows. Warning: may return
incorrect answers after 49.7 days on versions older than Vista.
"""
if hasattr(ctypes, 'windll') and hasattr(ctypes.windll, 'kernel32'):
lib = ctypes.windll.kernel32
else:
try:
# Windows CE uses the cdecl calling convention.
lib = ctypes.CDLL('coredll.lib')
except (AttributeError, OSError):
return None
if hasattr(lib, 'GetTickCount64'):
# Vista/Server 2008 or later.
lib.GetTickCount64.restype = ctypes.c_uint64
return lib.GetTickCount64() / 1000.
if hasattr(lib, 'GetTickCount'):
# WinCE and Win2k or later; gives wrong answers after 49.7 days.
lib.GetTickCount.restype = ctypes.c_uint32
return lib.GetTickCount() / 1000.
return None | Returns uptime in seconds or None, on Windows. Warning: may return
incorrect answers after 49.7 days on versions older than Vista. |
def validate_arguments(self, start_date, end_date, **kwargs):
"""Validate query arguments."""
if set(kwargs) < set(self.required_filters):
raise InvalidRequestInputError(
'Missing one of the required parameters {0} in '
'query {1}'.format(set(self.required_filters.keys()),
self.query_name)
) | Validate query arguments. |
def decodeMessage(self, data):
"""Decode a protobuf message into a list of Tensor events"""
message = proto_pb2.Msg()
message.ParseFromString(data)
return message | Decode a protobuf message into a list of Tensor events |
def zone_schedules_restore(self, filename):
"""Restore all zones on control system from the given file."""
_LOGGER.info("Restoring schedules to ControlSystem %s (%s)...",
self.systemId, self.location)
_LOGGER.info("Reading from backup file: %s...", filename)
with open(filename, 'r') as file_input:
schedule_db = file_input.read()
schedules = json.loads(schedule_db)
for zone_id, zone_schedule in schedules.items():
name = zone_schedule['name']
zone_info = zone_schedule['schedule']
_LOGGER.info("Restoring schedule for: %s - %s...",
zone_id, name)
if self.hotwater and self.hotwater.zoneId == zone_id:
self.hotwater.set_schedule(json.dumps(zone_info))
else:
self.zones_by_id[zone_id].set_schedule(
json.dumps(zone_info))
_LOGGER.info("Restore completed.") | Restore all zones on control system from the given file. |
def signed_session(self, session=None):
"""Create requests session with any required auth headers applied.
If a session object is provided, configure it directly. Otherwise,
create a new session and return it.
:param session: The session to configure for authentication
:type session: requests.Session
:rtype: requests.Session
"""
# Token cache is handled by the VM extension, call each time to avoid expiration
self.set_token()
return super(MSIAuthentication, self).signed_session(session) | Create requests session with any required auth headers applied.
If a session object is provided, configure it directly. Otherwise,
create a new session and return it.
:param session: The session to configure for authentication
:type session: requests.Session
:rtype: requests.Session |
def UNTL_to_encodedUNTL(subject):
"""Normalize a UNTL subject heading to be used in SOLR."""
subject = normalize_UNTL(subject)
subject = subject.replace(' ', '_')
subject = subject.replace('_-_', '/')
return subject | Normalize a UNTL subject heading to be used in SOLR. |
def _gcs_delete(args, _):
""" Delete one or more buckets or objects. """
objects = _expand_list(args['bucket'])
objects.extend(_expand_list(args['object']))
errs = []
for obj in objects:
try:
bucket, key = google.datalab.storage._bucket.parse_name(obj)
if bucket and key:
gcs_object = google.datalab.storage.Object(bucket, key)
if gcs_object.exists():
google.datalab.storage.Object(bucket, key).delete()
else:
errs.append("%s does not exist" % obj)
elif bucket:
gcs_bucket = google.datalab.storage.Bucket(bucket)
if gcs_bucket.exists():
gcs_bucket.delete()
else:
errs.append("%s does not exist" % obj)
else:
raise Exception("Can't delete object with invalid name %s" % obj)
except Exception as e:
errs.append("Couldn't delete %s: %s" %
(obj, _extract_gcs_api_response_error(str(e))))
if errs:
raise Exception('\n'.join(errs)) | Delete one or more buckets or objects. |
def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value directory on the parent."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
self._processor.serialize_on_parent(parent, xml_value, state) | Serialize the value directory on the parent. |
def validate(self, pkt, messages=None):
"""Returns True if the given Packet is valid, False otherwise.
Validation error messages are appended to an optional messages
array.
"""
valid = True
for f in self.fields:
try:
value = getattr(pkt, f.name)
except AttributeError:
valid = False
if messages is not None:
msg = "Telemetry field mismatch for packet '%s'. "
msg += "Unable to retrieve value for %s in Packet."
values = self.name, f.name
messages.append(msg % values)
break
if f.validate(value, messages) is False:
valid = False
return valid | Returns True if the given Packet is valid, False otherwise.
Validation error messages are appended to an optional messages
array. |
def schaffer(self, x):
""" Schaffer function x0 in [-100..100]"""
N = len(x)
s = x[0:N - 1]**2 + x[1:N]**2
return sum(s**0.25 * (np.sin(50 * s**0.1)**2 + 1)) | Schaffer function x0 in [-100..100] |
def _latex_circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
plot_barriers=True,
reverse_bits=False,
justify=None):
"""Draw a quantum circuit based on latex (Qcircuit package)
Requires version >=2.6.0 of the qcircuit LaTeX package.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
PIL.Image: an in-memory representation of the circuit diagram
Raises:
OSError: usually indicates that ```pdflatex``` or ```pdftocairo``` is
missing.
CalledProcessError: usually points errors during diagram creation.
"""
tmpfilename = 'circuit'
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, tmpfilename + '.tex')
_generate_latex_source(circuit, filename=tmppath,
scale=scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits, justify=justify)
image = None
try:
subprocess.run(["pdflatex", "-halt-on-error",
"-output-directory={}".format(tmpdirname),
"{}".format(tmpfilename + '.tex')],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
check=True)
except OSError as ex:
if ex.errno == errno.ENOENT:
logger.warning('WARNING: Unable to compile latex. '
'Is `pdflatex` installed? '
'Skipping latex circuit drawing...')
raise
except subprocess.CalledProcessError as ex:
with open('latex_error.log', 'wb') as error_file:
error_file.write(ex.stdout)
logger.warning('WARNING Unable to compile latex. '
'The output from the pdflatex command can '
'be found in latex_error.log')
raise
else:
try:
base = os.path.join(tmpdirname, tmpfilename)
subprocess.run(["pdftocairo", "-singlefile", "-png", "-q",
base + '.pdf', base])
image = Image.open(base + '.png')
image = utils._trim(image)
os.remove(base + '.png')
if filename:
image.save(filename, 'PNG')
except OSError as ex:
if ex.errno == errno.ENOENT:
logger.warning('WARNING: Unable to convert pdf to image. '
'Is `poppler` installed? '
'Skipping circuit drawing...')
raise
return image | Draw a quantum circuit based on latex (Qcircuit package)
Requires version >=2.6.0 of the qcircuit LaTeX package.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
PIL.Image: an in-memory representation of the circuit diagram
Raises:
OSError: usually indicates that ```pdflatex``` or ```pdftocairo``` is
missing.
CalledProcessError: usually points errors during diagram creation. |
def getCandScoresMapBruteForce(self, profile):
"""
Returns a dictonary that associates the integer representation of each candidate with the
bayesian losses that we calculate using brute force.
:ivar Profile profile: A Profile object that represents an election profile.
"""
wmg = profile.getWmg(True)
m = len(wmg.keys())
cands = range(m)
V = self.createBinaryRelation(m)
gains = dict()
for cand in wmg.keys():
gains[cand] = 0
graphs = itertools.product(range(2), repeat=m*(m-1)/2)
for comb in graphs:
prob = 1
i = 0
for a, b in itertools.combinations(cands,2):
V[a][b] = comb[i]
V[b][a] = 1-comb[i]
if comb[i] > 0:
prob *= 1/(1+self.phi ** float(wmg[a+1][b+1]))
else:
prob *= 1/(1+self.phi ** float(wmg[b+1][a+1]))
i += 1
if i >= m*(m-1)/2:
break
for cand in wmg.keys():
gains[cand] += self.utilityFunction.getUtility([cand], V)*prob
return gains | Returns a dictonary that associates the integer representation of each candidate with the
bayesian losses that we calculate using brute force.
:ivar Profile profile: A Profile object that represents an election profile. |
def scan_module(self, modpath, node):
"""Scans a module, collecting all used origins, assuming that modules
are obtained only by dotted paths and no other kinds of expressions."""
used_origins = self.map.setdefault(modpath, set())
def get_origins(modpath, name):
"""Returns the chain of all origins for a given name in a module."""
origins = set()
def walk_origins(modpath, name):
for origin in self.import_map.get_origins(modpath, name):
if origin not in origins:
origins.add(origin)
if '.' in origin:
walk_origins(*origin.rsplit('.', 1))
walk_origins(modpath, name)
return origins
def get_origins_for_node(node):
"""Returns the set of all possible origins to which the given
dotted-path expression might dereference."""
if node_type(node) == 'Name' and node_type(node.ctx) == 'Load':
return {modpath + '.' + node.id} | get_origins(modpath, node.id)
if node_type(node) == 'Attribute' and node_type(node.ctx) == 'Load':
return set.union(set(), *[
{parent + '.' + node.attr} | get_origins(parent, node.attr)
for parent in get_origins_for_node(node.value)])
return set()
def get_origins_used_by_node(node):
"""Returns the set of all possible origins that could be used
during dereferencing of the given dotted-path expression."""
if node_type(node) == 'Name':
return get_origins_for_node(node)
if node_type(node) == 'Attribute':
return set.union(get_origins_used_by_node(node.value),
get_origins_for_node(node))
return set()
def scan_loads(node):
if node_type(node) in ['Name', 'Attribute']:
used_origins.update(get_origins_used_by_node(node))
for_each_child(node, scan_loads)
for_each_child(node, scan_loads)
intermediate_origins = set()
for origin in used_origins:
parts = origin.split('.')
for i in range(1, len(parts)):
intermediate_origins.add('.'.join(parts[:i]))
used_origins.update(intermediate_origins) | Scans a module, collecting all used origins, assuming that modules
are obtained only by dotted paths and no other kinds of expressions. |
def Nu_Kitoh(Re, Pr, H=None, G=None, q=None):
r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_,
also shown in [2]_, [3]_ and [4]_. Depends on fluid enthalpy, mass flux,
and heat flux.
.. math::
Nu_b = 0.015Re_b^{0.85} Pr_b^m
m = 0.69 - \frac{81000}{q_{dht}} + f_cq
q_{dht} = 200 G^{1.2}
f_c = 2.9\times10^{-8} + \frac{0.11}{q_{dht}} \text{ for }
H_b < 1500 \text{ kJ/kg}
f_c = -8.7\times10^{-8} - \frac{0.65}{q_{dht}} \text{ for }
1500 \text{ kJ/kg} < H_b < 3300 \text{ kJ/kg}
f_c = -9.7\times10^{-7} + \frac{1.3}{q_{dht}} \text{ for }
H_b > 3300 \text{ kJ/kg}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties, [-]
H : float, optional
Enthalpy of water (if the fluid is water), [J/kg]
G : float, optional
Mass flux of the fluid, [kg/m^2/s]
q : float, optional
Heat flux to wall, [W/m^2]
Returns
-------
Nu : float
Nusselt number as explained below, [-]
Notes
-----
The reference point for the enthalpy values is not stated in [1]_. The
upper and lower enthalpy limits for this correlation are 4000 kJ/kg and
0 kJ/kg, but these are not enforced in this function.
If not all of H, G, and q are provided, the correlation is used without
the correction.
This correlation was ranked 6th best in [3]_, and found 4th best for
enhanced heat transfer in [2]_ with a MAD of 12.3%.
For the data used to develop the correlation, G varied from 100-1750
kg/m^2/s, q varied from 0 to 1800 kW/m^2, and bulk temperature varied from
20 to 550 decrees Celsius.
This correlation does not have realistic behavior for values outside those
used in the study, and should not be used.
Examples
--------
>>> Nu_Kitoh(1E5, 1.2, 1.3E6, 1500, 5E6)
331.80234139591306
References
----------
.. [1] Kitoh, Kazuaki, Seiichi Koshizuka, and Yoshiaki Oka. "Refinement of
Transient Criteria and Safety Analysis for a High-Temperature Reactor
Cooled by Supercritical Water." Nuclear Technology 135, no. 3
(September 1, 2001): 252-64.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
.. [3] Yu, Jiyang, Baoshan Jia, Dan Wu, and Daling Wang. "Optimization of
Heat Transfer Coefficient Correlation at Supercritical Pressure Using
Genetic Algorithms." Heat and Mass Transfer 45, no. 6 (January 8, 2009):
757-66. doi:10.1007/s00231-008-0475-4.
.. [4] Jäger, Wadim, Victor Hugo Sánchez Espinoza, and Antonio Hurtado.
"Review and Proposal for Heat Transfer Predictions at Supercritical
Water Conditions Using Existing Correlations and Experiments." Nuclear
Engineering and Design, (W3MDM) University of Leeds International
Symposium: What Where When? Multi-dimensional Advances for Industrial
Process Monitoring, 241, no. 6 (June 2011): 2184-2203.
doi:10.1016/j.nucengdes.2011.03.022.
'''
if H and G and q:
qht = 200.*G**1.2
if H < 1.5E6:
fc = 2.9E-8 + 0.11/qht
elif 1.5E6 <= H <= 3.3E6:
fc = -8.7E-8 - 0.65/qht
else:
fc = -9.7E-7 + 1.3/qht
m = 0.69 - 81000./qht + fc*q
else:
m = 0.69
return 0.015*Re**0.85*Pr**m | r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_,
also shown in [2]_, [3]_ and [4]_. Depends on fluid enthalpy, mass flux,
and heat flux.
.. math::
Nu_b = 0.015Re_b^{0.85} Pr_b^m
m = 0.69 - \frac{81000}{q_{dht}} + f_cq
q_{dht} = 200 G^{1.2}
f_c = 2.9\times10^{-8} + \frac{0.11}{q_{dht}} \text{ for }
H_b < 1500 \text{ kJ/kg}
f_c = -8.7\times10^{-8} - \frac{0.65}{q_{dht}} \text{ for }
1500 \text{ kJ/kg} < H_b < 3300 \text{ kJ/kg}
f_c = -9.7\times10^{-7} + \frac{1.3}{q_{dht}} \text{ for }
H_b > 3300 \text{ kJ/kg}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties, [-]
H : float, optional
Enthalpy of water (if the fluid is water), [J/kg]
G : float, optional
Mass flux of the fluid, [kg/m^2/s]
q : float, optional
Heat flux to wall, [W/m^2]
Returns
-------
Nu : float
Nusselt number as explained below, [-]
Notes
-----
The reference point for the enthalpy values is not stated in [1]_. The
upper and lower enthalpy limits for this correlation are 4000 kJ/kg and
0 kJ/kg, but these are not enforced in this function.
If not all of H, G, and q are provided, the correlation is used without
the correction.
This correlation was ranked 6th best in [3]_, and found 4th best for
enhanced heat transfer in [2]_ with a MAD of 12.3%.
For the data used to develop the correlation, G varied from 100-1750
kg/m^2/s, q varied from 0 to 1800 kW/m^2, and bulk temperature varied from
20 to 550 decrees Celsius.
This correlation does not have realistic behavior for values outside those
used in the study, and should not be used.
Examples
--------
>>> Nu_Kitoh(1E5, 1.2, 1.3E6, 1500, 5E6)
331.80234139591306
References
----------
.. [1] Kitoh, Kazuaki, Seiichi Koshizuka, and Yoshiaki Oka. "Refinement of
Transient Criteria and Safety Analysis for a High-Temperature Reactor
Cooled by Supercritical Water." Nuclear Technology 135, no. 3
(September 1, 2001): 252-64.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
.. [3] Yu, Jiyang, Baoshan Jia, Dan Wu, and Daling Wang. "Optimization of
Heat Transfer Coefficient Correlation at Supercritical Pressure Using
Genetic Algorithms." Heat and Mass Transfer 45, no. 6 (January 8, 2009):
757-66. doi:10.1007/s00231-008-0475-4.
.. [4] Jäger, Wadim, Victor Hugo Sánchez Espinoza, and Antonio Hurtado.
"Review and Proposal for Heat Transfer Predictions at Supercritical
Water Conditions Using Existing Correlations and Experiments." Nuclear
Engineering and Design, (W3MDM) University of Leeds International
Symposium: What Where When? Multi-dimensional Advances for Industrial
Process Monitoring, 241, no. 6 (June 2011): 2184-2203.
doi:10.1016/j.nucengdes.2011.03.022. |
def raises(self, expected_exception):
"""
Ensures preceding predicates (specifically, :meth:`called_with()`) result in *expected_exception* being raised.
"""
return unittest_case.assertRaises(expected_exception, self._orig_subject, *self._args, **self._kwargs) | Ensures preceding predicates (specifically, :meth:`called_with()`) result in *expected_exception* being raised. |
def run(cmd, data=None, checks=None, region=None, log_error=True,
log_stdout=False):
"""Run the provided command, logging details and checking for errors.
"""
try:
logger.debug(" ".join(str(x) for x in cmd) if not isinstance(cmd, basestring) else cmd)
_do_run(cmd, checks, log_stdout)
except:
if log_error:
logger.info("error at command")
raise | Run the provided command, logging details and checking for errors. |
def install_cache(expire_after=12 * 3600, cache_post=False):
"""
Patches the requests library with requests_cache.
"""
allowable_methods = ['GET']
if cache_post:
allowable_methods.append('POST')
requests_cache.install_cache(
expire_after=expire_after,
allowable_methods=allowable_methods) | Patches the requests library with requests_cache. |
def unsubscribe(self, connection, destination):
"""
Unsubscribes a connection from the specified topic destination.
@param connection: The client connection to unsubscribe.
@type connection: L{coilmq.server.StompConnection}
@param destination: The topic destination (e.g. '/topic/foo')
@type destination: C{str}
"""
self.log.debug("Unsubscribing %s from %s" % (connection, destination))
if connection in self._topics[destination]:
self._topics[destination].remove(connection)
if not self._topics[destination]:
del self._topics[destination] | Unsubscribes a connection from the specified topic destination.
@param connection: The client connection to unsubscribe.
@type connection: L{coilmq.server.StompConnection}
@param destination: The topic destination (e.g. '/topic/foo')
@type destination: C{str} |
def translate_expression(expression):
"""
Check if the expression is valid, then check turn it into an expression that can be used for filtering.
:return list of lists: One or more matches. Each list has 3 strings.
"""
logger_ts.info("enter translate_expression")
m = re_filter_expr.findall(expression)
matches = []
if m:
for i in m:
logger_ts.info("parse match: {}".format(i))
tmp = list(i[1:])
if tmp[1] in COMPARISONS:
tmp[1] = COMPARISONS[tmp[1]]
tmp[0] = cast_float(tmp[0])
tmp[2] = cast_float(tmp[2])
matches.append(tmp)
else:
logger_ts.warn("translate_expression: invalid expression: {}".format(expression))
print("Invalid input expression")
logger_ts.info("exit translate_expression")
return matches | Check if the expression is valid, then check turn it into an expression that can be used for filtering.
:return list of lists: One or more matches. Each list has 3 strings. |
def date(self):
""":return: datetime object"""
if self.commit_time:
return datetime.utcfromtimestamp(self.commit_time)
else:
return datetime.now() | :return: datetime object |
def And(*predicates, **kwargs):
"""
`And` predicate. Returns ``False`` at the first sub-predicate that returns ``False``.
"""
if kwargs:
predicates += Query(**kwargs),
return _flatten(_And, *predicates) | `And` predicate. Returns ``False`` at the first sub-predicate that returns ``False``. |
def home_shift_summ(self):
"""
:returns: :py:class:`.ShiftSummary` by player for the home team
:rtype: dict ``{ player_num: shift_summary_obj }``
"""
if not self.__wrapped_home:
self.__wrapped_home = self.__wrap(self._home.by_player)
return self.__wrapped_home | :returns: :py:class:`.ShiftSummary` by player for the home team
:rtype: dict ``{ player_num: shift_summary_obj }`` |
def find_files(self, ID=None, fileGrp=None, pageId=None, mimetype=None, url=None, local_only=False):
"""
Search ``mets:file`` in this METS document.
Args:
ID (string) : ID of the file
fileGrp (string) : USE of the fileGrp to list files of
pageId (string) : ID of physical page manifested by matching files
url (string) : @xlink:href of mets:Flocat of mets:file
mimetype (string) : MIMETYPE of matching files
local (boolean) : Whether to restrict results to local files, i.e. file://-URL
Return:
List of files.
"""
ret = []
fileGrp_clause = '' if fileGrp is None else '[@USE="%s"]' % fileGrp
file_clause = ''
if ID is not None:
file_clause += '[@ID="%s"]' % ID
if mimetype is not None:
file_clause += '[@MIMETYPE="%s"]' % mimetype
if url is not None:
file_clause += '[mets:FLocat[@xlink:href = "%s"]]' % url
# TODO lxml says invalid predicate. I disagree
# if local_only:
# file_clause += "[mets:FLocat[starts-with(@xlink:href, 'file://')]]"
# Search
file_ids = self._tree.getroot().xpath("//mets:fileGrp%s/mets:file%s/@ID" % (fileGrp_clause, file_clause), namespaces=NS)
if pageId is not None:
by_pageid = self._tree.getroot().xpath('//mets:div[@TYPE="page"][@ID="%s"]/mets:fptr/@FILEID' % pageId, namespaces=NS)
file_ids = [i for i in by_pageid if i in file_ids]
# instantiate / get from cache
for file_id in file_ids:
el = self._tree.getroot().find('.//mets:file[@ID="%s"]' % file_id, NS)
if file_id not in self._file_by_id:
self._file_by_id[file_id] = OcrdFile(el, mets=self)
# If only local resources should be returned and file is neither a
# file:// URL nor a file path: skip the file
url = self._file_by_id[file_id].url
if local_only and not (url.startswith('file://') or '://' not in url):
continue
ret.append(self._file_by_id[file_id])
return ret | Search ``mets:file`` in this METS document.
Args:
ID (string) : ID of the file
fileGrp (string) : USE of the fileGrp to list files of
pageId (string) : ID of physical page manifested by matching files
url (string) : @xlink:href of mets:Flocat of mets:file
mimetype (string) : MIMETYPE of matching files
local (boolean) : Whether to restrict results to local files, i.e. file://-URL
Return:
List of files. |
def vstackm(matrices):
"""Generalizes `numpy.vstack` to :class:`Matrix` objects."""
arr = np_vstack(tuple(m.matrix for m in matrices))
# print(tuple(m.matrix.dtype for m in matrices))
# print(arr.dtype)
return Matrix(arr) | Generalizes `numpy.vstack` to :class:`Matrix` objects. |
def noinfo(self, msg, oname):
"""Generic message when no information is found."""
print 'No %s found' % msg,
if oname:
print 'for %s' % oname
else:
print | Generic message when no information is found. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.