text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _query_zendesk(self, endpoint, object_type, *endpoint_args, **endpoint_kwargs):
"""
Query Zendesk for items. If an id or list of ids are passed, attempt to locate these items
in the relevant cache. If they cannot be found, or no ids are passed, execute a call to Zendesk
to retrieve the items.
:param endpoint: target endpoint.
:param object_type: object type we are expecting.
:param endpoint_args: args for endpoint
:param endpoint_kwargs: kwargs for endpoint
:return: either a ResultGenerator or a Zenpy object.
"""
_id = endpoint_kwargs.get('id', None)
if _id:
item = self.cache.get(object_type, _id)
if item:
return item
else:
return self._get(url=self._build_url(endpoint(*endpoint_args, **endpoint_kwargs)))
elif 'ids' in endpoint_kwargs:
cached_objects = []
# Check to see if we have all objects in the cache.
# If we are missing even one we request them all again.
# This could be optimized to only request the missing objects.
for _id in endpoint_kwargs['ids']:
obj = self.cache.get(object_type, _id)
if not obj:
return self._get(self._build_url(endpoint=endpoint(*endpoint_args, **endpoint_kwargs)))
cached_objects.append(obj)
return ZendeskResultGenerator(self, {}, response_objects=cached_objects, object_type=object_type)
else:
return self._get(self._build_url(endpoint=endpoint(*endpoint_args, **endpoint_kwargs))) | 0.005389 |
def matrix(self):
"""The 4x4 matrix representation of this rotation"""
result = np.identity(4, float)
result[0:3, 0:3] = self.r
return result | 0.011561 |
def strip_headers(text):
"""Remove lines that are part of the Project Gutenberg header or footer.
Note: this function is a port of the C++ utility by Johannes Krugel. The
original version of the code can be found at:
http://www14.in.tum.de/spp1307/src/strip_headers.cpp
Args:
text (unicode): The body of the text to clean up.
Returns:
unicode: The text with any non-text content removed.
"""
lines = text.splitlines()
sep = str(os.linesep)
out = []
i = 0
footer_found = False
ignore_section = False
for line in lines:
reset = False
if i <= 600:
# Check if the header ends here
if any(line.startswith(token) for token in TEXT_START_MARKERS):
reset = True
# If it's the end of the header, delete the output produced so far.
# May be done several times, if multiple lines occur indicating the
# end of the header
if reset:
out = []
continue
if i >= 100:
# Check if the footer begins here
if any(line.startswith(token) for token in TEXT_END_MARKERS):
footer_found = True
# If it's the beginning of the footer, stop output
if footer_found:
break
if any(line.startswith(token) for token in LEGALESE_START_MARKERS):
ignore_section = True
continue
elif any(line.startswith(token) for token in LEGALESE_END_MARKERS):
ignore_section = False
continue
if not ignore_section:
out.append(line.rstrip(sep))
i += 1
return sep.join(out) | 0.000579 |
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
from django.contrib.auth.views import login
context = {
'title': _('Log in'),
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'cms/login.html',
}
return login(request, **defaults) | 0.003053 |
def parse_mcast_grps(family, grp_attr):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L64.
Positional arguments:
family -- genl_family class instance.
grp_attr -- nlattr class instance.
Returns:
0 on success or a negative error code.
"""
remaining = c_int()
if not grp_attr:
raise BUG
for nla in nla_for_each_nested(grp_attr, remaining):
tb = dict()
err = nla_parse_nested(tb, CTRL_ATTR_MCAST_GRP_MAX, nla, family_grp_policy)
if err < 0:
return err
if not tb[CTRL_ATTR_MCAST_GRP_ID] or not tb[CTRL_ATTR_MCAST_GRP_NAME]:
return -NLE_MISSING_ATTR
id_ = nla_get_u32(tb[CTRL_ATTR_MCAST_GRP_ID])
name = nla_get_string(tb[CTRL_ATTR_MCAST_GRP_NAME])
err = genl_family_add_grp(family, id_, name)
if err < 0:
return err
return 0 | 0.002222 |
def write_record(self, event_str):
"""Writes a serialized event to file."""
header = struct.pack('Q', len(event_str))
header += struct.pack('I', masked_crc32c(header))
footer = struct.pack('I', masked_crc32c(event_str))
self._writer.write(header + event_str + footer) | 0.006515 |
def _initialize_trackbars(self):
"""
Initialize trackbars by discovering ``block_matcher``'s parameters.
"""
for parameter in self.block_matcher.parameter_maxima.keys():
maximum = self.block_matcher.parameter_maxima[parameter]
if not maximum:
maximum = self.shortest_dimension
cv2.createTrackbar(parameter, self.window_name,
self.block_matcher.__getattribute__(parameter),
maximum,
partial(self._set_value, parameter)) | 0.003361 |
def magic_read(infile, data=None, return_keys=False, verbose=False):
"""
Reads a Magic template file, returns data in a list of dictionaries.
Parameters
___________
Required:
infile : the MagIC formatted tab delimited data file
first line contains 'tab' in the first column and the data file type in the second (e.g., measurements, specimen, sample, etc.)
Optional:
data : data read in with, e.g., file.readlines()
Returns
_______
list of dictionaries, file type
"""
if infile:
if not os.path.exists(infile):
if return_keys:
return [], 'empty_file', []
return [], 'empty_file'
hold, magic_data, magic_record, magic_keys = [], [], {}, []
if data:
lines = list(data)
elif (not data) and (not infile):
if return_keys:
return [], 'empty_file', []
return [], 'empty_file'
else:
# if the file doesn't exist, end here
if not os.path.exists(infile):
if return_keys:
return [], 'bad_file', []
return [], 'bad_file'
# use custom pmagpy open_file
lines = open_file(infile, verbose=verbose)
if not lines:
if return_keys:
return [], 'bad_file', []
return [], 'bad_file'
d_line = lines[0][:-1].strip('\n').strip('\r').strip('\t')
if not d_line:
if return_keys:
return [], 'empty_file', []
return [], 'empty_file'
if d_line[0] == "s" or d_line[1] == "s":
delim = 'space'
elif d_line[0] == "t" or d_line[1] == "t":
delim = 'tab'
else:
print('-W- error reading {}. Check that this is a MagIC-format file'.format(infile))
if return_keys:
return [], 'bad_file', []
return [], 'bad_file'
if delim == 'space':
file_type = d_line.split()[1]
if delim == 'tab':
file_type = d_line.split('\t')[1]
if file_type == 'delimited':
if delim == 'space':
file_type = d_line.split()[2]
if delim == 'tab':
file_type = d_line.split('\t')[2]
line = lines[1].strip('\n').strip('\r')
if delim == 'space':
line = line.split() # lines[1][:-1].split()
if delim == 'tab':
line = line.split('\t') # lines[1][:-1].split('\t')
for key in line:
magic_keys.append(key)
lines = lines[2:]
if len(lines) < 1:
if return_keys:
return [], 'empty_file', []
return [], 'empty_file'
for line in lines[:-1]:
line.replace('\n', '')
if delim == 'space':
rec = line[:-1].split()
if delim == 'tab':
rec = line[:-1].split('\t')
hold.append(rec)
line = lines[-1].replace('\n', '').replace('\r', '')
if delim == 'space':
rec = line[:-1].split()
if delim == 'tab':
rec = line.split('\t')
hold.append(rec)
for rec in hold:
magic_record = {}
if len(magic_keys) > len(rec):
# pad rec with empty strings if needed
for i in range(len(magic_keys) - len(rec)):
rec.append('')
if len(magic_keys) != len(rec):
# ignores this warning when reading the dividers in an upload.txt
# composite file
if rec != ['>>>>>>>>>>'] and 'delimited' not in rec[0]:
print("Warning: Uneven record lengths detected in {}: ".format(infile))
print('keys:', magic_keys)
print('record:', rec)
# modified by Ron Shaar:
# add a health check:
# if len(magic_keys) > len(rec): take rec
# if len(magic_keys) < len(rec): take magic_keys
# original code: for k in range(len(rec)):
# channged to: for k in range(min(len(magic_keys),len(rec))):
for k in range(min(len(magic_keys), len(rec))):
magic_record[magic_keys[k]] = rec[k].strip('\n').strip('\r')
magic_data.append(magic_record)
magictype = file_type.lower().split("_")
Types = ['er', 'magic', 'pmag', 'rmag']
if magictype in Types:
file_type = file_type.lower()
if return_keys:
return magic_data, file_type, magic_keys
return magic_data, file_type | 0.00093 |
def _import_serializer_class(self, location):
"""
Resolves a dot-notation string to serializer class.
<app>.<SerializerName> will automatically be interpreted as:
<app>.serializers.<SerializerName>
"""
pieces = location.split(".")
class_name = pieces.pop()
if pieces[len(pieces) - 1] != "serializers":
pieces.append("serializers")
module = importlib.import_module(".".join(pieces))
return getattr(module, class_name) | 0.003922 |
def get_stats_participation(self):
"""
:calls: `GET /repos/:owner/:repo/stats/participation <http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else>`_
:rtype: None or :class:`github.StatsParticipation.StatsParticipation`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/participation"
)
if not data:
return None
else:
return github.StatsParticipation.StatsParticipation(self._requester, headers, data, completed=True) | 0.006431 |
def create_mutation_inputs(service):
"""
Args:
service : The service being created by the mutation
Returns:
(list) : a list of all of the fields availible for the service,
with the required ones respected.
"""
# grab the default list of field summaries
inputs = _service_mutation_summaries(service)
# make sure the pk isn't in the list
inputs.remove([field for field in inputs if field['name'] == 'id'][0])
# return the final list
return inputs | 0.001876 |
def _handle_break(self, node, scope, ctxt, stream):
"""Handle break node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling break")
raise errors.InterpBreak() | 0.007353 |
def subdivide_to_size(vertices,
faces,
max_edge,
max_iter=10):
"""
Subdivide a mesh until every edge is shorter than a
specified length.
Will return a triangle soup, not a nicely structured mesh.
Parameters
------------
vertices : (n, 3) float
Vertices in space
faces : (m, 3) int
Indices of vertices which make up triangles
max_edge : float
Maximum length of any edge in the result
max_iter : int
The maximum number of times to run subdivision
Returns
------------
vertices : (j, 3) float
Vertices in space
faces : (q, 3) int
Indices of vertices
"""
# store completed
done_face = []
done_vert = []
# copy inputs and make sure dtype is correct
current_faces = np.array(faces,
dtype=np.int64,
copy=True)
current_vertices = np.array(vertices,
dtype=np.float64,
copy=True)
# loop through iteration cap
for i in range(max_iter + 1):
# (n, 3, 3) float triangle soup
triangles = current_vertices[current_faces]
# compute the length of every triangle edge
edge_lengths = (np.diff(triangles[:, [0, 1, 2, 0]],
axis=1) ** 2).sum(axis=2) ** .5
too_long = (edge_lengths > max_edge).any(axis=1)
# clean up the faces a little bit so we don't
# store a ton of unused vertices
unique, inverse = np.unique(
current_faces[np.logical_not(too_long)],
return_inverse=True)
# store vertices and faces meeting criteria
done_vert.append(current_vertices[unique])
done_face.append(inverse.reshape((-1, 3)))
# met our goals so abort
if not too_long.any():
break
# run subdivision again
(current_vertices,
current_faces) = subdivide(current_vertices,
current_faces[too_long])
# stack sequence into nice (n, 3) arrays
vertices, faces = util.append_faces(done_vert,
done_face)
return vertices, faces | 0.000438 |
def kibana_config(self):
"""
config kibana
:return:
"""
uncomment("/etc/kibana/kibana.yml", "#server.host:", use_sudo=True)
sed('/etc/kibana/kibana.yml', 'server.host:.*',
'server.host: "{0}"'.format(env.host_string), use_sudo=True)
sudo('systemctl stop kibana.service')
sudo('systemctl daemon-reload')
sudo('systemctl enable kibana.service')
sudo('systemctl start kibana.service') | 0.004219 |
def frost_days(tasmin, freq='YS'):
r"""Frost days index
Number of days where daily minimum temperatures are below 0℃.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Frost days index.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} < 0℃
"""
tu = units.parse_units(tasmin.attrs['units'].replace('-', '**-'))
fu = 'degC'
frz = 0
if fu != tu:
frz = units.convert(frz, fu, tu)
f = (tasmin < frz) * 1
return f.resample(time=freq).sum(dim='time') | 0.002528 |
def getScreen(self,screen_data=None):
"""This function fills screen_data with the RAW Pixel data
screen_data MUST be a numpy array of uint8/int8. This could be initialized like so:
screen_data = np.array(w*h,dtype=np.uint8)
Notice, it must be width*height in size also
If it is None, then this function will initialize it
Note: This is the raw pixel values from the atari, before any RGB palette transformation takes place
"""
if(screen_data is None):
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenWidth(self.obj)
screen_data = np.zeros(width*height,dtype=np.uint8)
ale_lib.getScreen(self.obj,as_ctypes(screen_data))
return screen_data | 0.009067 |
def _create_placeholder_objects(self):
""" PDF objects #1 through #3 are typically saved for the
Zeroth, Catalog, and Pages Objects. This program will save
the numbers, but outputs the individual Page and Content objects
first. The actual Catalog and Pages objects are calculated after.
"""
self.objects.append("Zeroth")
self.objects.append("Catalog")
self.objects.append("Pages") | 0.004292 |
def _find_passwords(self, service, username, deleting=False):
"""Get password of the username for the service
"""
passwords = []
service = self._safe_string(service)
username = self._safe_string(username)
for attrs_tuple in (('username', 'service'), ('user', 'domain')):
attrs = GnomeKeyring.Attribute.list_new()
GnomeKeyring.Attribute.list_append_string(
attrs, attrs_tuple[0], username)
GnomeKeyring.Attribute.list_append_string(
attrs, attrs_tuple[1], service)
result, items = GnomeKeyring.find_items_sync(
GnomeKeyring.ItemType.NETWORK_PASSWORD, attrs)
if result == GnomeKeyring.Result.OK:
passwords += items
elif deleting:
if result == GnomeKeyring.Result.CANCELLED:
raise PasswordDeleteError("Cancelled by user")
elif result != GnomeKeyring.Result.NO_MATCH:
raise PasswordDeleteError(result.value_name)
return passwords | 0.001835 |
def predict_distributed(self, data_rdd, batch_size = -1):
"""
Model inference base on the given data.
You need to invoke collect() to trigger those action \
as the returning result is an RDD.
:param data_rdd: the data to be predict.
:param batch_size: total batch size of prediction.
:return: An RDD represent the predict result.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictRDD", self.value, data_rdd, batch_size)
return result.map(lambda data: data.to_ndarray()) | 0.008503 |
def n_exec_stmt(self, node):
"""
exec_stmt ::= EXEC expr
exec_stmt ::= EXEC expr IN test
exec_stmt ::= EXEC expr IN test COMMA test
"""
self.write(self.indent, 'exec ')
self.preorder(node[1])
if len(node) > 2:
self.write(self.indent, ' in ')
self.preorder(node[3])
if len(node) > 5:
self.write(self.indent, ', ')
self.preorder(node[5])
self.println()
self.prune() | 0.003906 |
def keelhaul(rest):
"Inflict great pain and embarassment on some(one|thing)"
keelee = rest
karma.Karma.store.change(keelee, -1)
return (
"/me straps %s to a dirty rope, tosses 'em overboard and pulls "
"with great speed. Yarrr!" % keelee) | 0.028455 |
def to_pivot_table(self, fieldnames=(), verbose=True,
values=None, rows=None, cols=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, coerce_float=True):
"""
A convenience method for creating a spread sheet style pivot table
as a DataFrame
Parameters
----------
fieldnames: The model field names(columns) to utilise in creating
the DataFrame. You can span a relationships in the usual
Django ORM way by using the foreign key field name
separated by double underscores and refer to a field
in a related model.
values: The field to use to calculate the values to aggregate.
rows: The list of field names to group on
Keys to group on the x-axis of the pivot table
cols: The list of column names or arrays to group on
Keys to group on the y-axis of the pivot table
aggfunc: How to arregate the values. By default this would be
``numpy.mean``. A list of aggregates functions can be passed
In this case the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
fill_value: A scalar value to replace the missing values with
margins: Boolean, default False Add all row / columns
(e.g. for subtotal / grand totals)
dropna: Boolean, default True.
Do not include columns whose entries are all NaN
verbose: If this is ``True`` then populate the DataFrame with the
human readable versions for foreign key fields else use the
actual values set in the model
coerce_float: Attempt to convert values to non-string, non-numeric
objects (like decimal.Decimal) to floating point.
"""
df = self.to_dataframe(fieldnames, verbose=verbose,
coerce_float=coerce_float)
return df.pivot_table(values=values, fill_value=fill_value, index=rows,
columns=cols, aggfunc=aggfunc, margins=margins,
dropna=dropna) | 0.002099 |
def get_netG():
"""Get net G"""
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*4) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*2) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
return netG | 0.000953 |
def axis_transform(pca_axes):
"""
Creates an affine transformation matrix to
rotate data in PCA axes into Cartesian plane
"""
from_ = N.identity(3)
to_ = pca_axes
# Find inverse transform for forward transform
# y = M x -> M = y (x)^(-1)
# We don't need to do least-squares since
# there is a simple transformation
trans_matrix = N.linalg.lstsq(from_,to_)[0]
return trans_matrix | 0.004684 |
def get_user(self, auth, username):
"""
Returns a representing the user with username ``username``.
:param auth.Authentication auth: authentication object, can be ``None``
:param str username: username of user to get
:return: the retrieved user
:rtype: GogsUser
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/users/{}".format(username)
response = self.get(path, auth=auth)
return GogsUser.from_json(response.json()) | 0.004942 |
def repost(self, token):
"""
Repost the job if it has timed out
(:py:data:`cloudsight.STATUS_TIMEOUT`).
:param token: Job token as returned from
:py:meth:`cloudsight.API.image_request` or
:py:meth:`cloudsight.API.remote_image_request`
"""
url = '%s/%s/repost' % (REQUESTS_URL, token)
response = requests.post(url, headers={
'Authorization': self.auth.authorize('POST', url),
'User-Agent': USER_AGENT,
})
if response.status_code == 200:
return
return self._unwrap_error(response) | 0.003101 |
def _set_interface_hello_padding(self, v, load=False):
"""
Setter method for interface_hello_padding, mapped from YANG variable /routing_system/interface/ve/intf_isis/interface_isis/interface_hello/interface_hello_padding (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_hello_padding is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_hello_padding() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_hello_padding.interface_hello_padding, is_container='container', presence=False, yang_name="interface-hello-padding", rest_name="padding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Pad hello packets on this interface', u'alt-name': u'padding'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_hello_padding must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_hello_padding.interface_hello_padding, is_container='container', presence=False, yang_name="interface-hello-padding", rest_name="padding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Pad hello packets on this interface', u'alt-name': u'padding'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__interface_hello_padding = t
if hasattr(self, '_set'):
self._set() | 0.005793 |
def add_state(self, state, storage_load=False):
"""Adds a state to the container state.
:param state: the state that is going to be added
:param storage_load: True if the state was directly loaded from filesystem
:return: the state_id of the new state
:raises exceptions.AttributeError: if state.state_id already exist
"""
assert isinstance(state, State)
# logger.info("add state {}".format(state))
# handle the case that the child state id is the same as the container state id or future sibling state id
while state.state_id == self.state_id or state.state_id in self.states:
state.change_state_id()
# TODO: add validity checks for states and then remove this check => to discuss
if state.state_id in self._states.keys():
raise AttributeError("State id %s already exists in the container state", state.state_id)
else:
state.parent = self
self._states[state.state_id] = state
return state.state_id | 0.00565 |
def tradesWS(symbols=None, on_data=None):
'''https://iextrading.com/developer/docs/#trades'''
symbols = _strToList(symbols)
sendinit = ({'symbols': symbols, 'channels': ['trades']},)
return _stream(_wsURL('deep'), sendinit, on_data) | 0.004032 |
def _bottom_position(self, resource):
"""
Place watermark to bottom position
:param resource: Image.Image
:return: Image.Image
"""
image = self._get_scaled_image(resource)
left = int(round(resource.size[0] // 2 - image.size[0] // 2))
upper = int(round(resource.size[1] - image.size[1]))
return image, left, upper | 0.005181 |
def distinguish(self, id_, how=True):
"""Login required. Sends POST to distinguish a submission or comment. Returns :class:`things.Link` or :class:`things.Comment`, or raises :class:`exceptions.UnexpectedResponse` otherwise.
URL: ``http://www.reddit.com/api/distinguish/``
:param id\_: full id of object to distinguish
:param how: either True, False, or 'admin'
"""
if how == True:
h = 'yes'
elif how == False:
h = 'no'
elif how == 'admin':
h = 'admin'
else:
raise ValueError("how must be either True, False, or 'admin'")
data = dict(id=id_)
j = self.post('api', 'distinguish', h, data=data)
try:
return self._thingify(j['json']['data']['things'][0])
except Exception:
raise UnexpectedResponse(j) | 0.010078 |
async def _get_messenger_profile(self, page, fields: List[Text]):
"""
Fetch the value of specified fields in order to avoid setting the same
field twice at the same value (since Facebook engineers are not able
to make menus that keep on working if set again).
"""
params = {
'access_token': page['page_token'],
'fields': ','.join(fields),
}
get = self.session.get(PROFILE_ENDPOINT, params=params)
async with get as r:
await self._handle_fb_response(r)
out = {}
for data in (await r.json())['data']:
out.update(data)
return out | 0.002894 |
def provider_parser(subparser):
"""Configure provider parser for CloudNS"""
identity_group = subparser.add_mutually_exclusive_group()
identity_group.add_argument(
"--auth-id", help="specify user id for authentication")
identity_group.add_argument(
"--auth-subid", help="specify subuser id for authentication")
identity_group.add_argument(
"--auth-subuser", help="specify subuser name for authentication")
subparser.add_argument(
"--auth-password", help="specify password for authentication")
subparser.add_argument("--weight", help="specify the SRV record weight")
subparser.add_argument("--port", help="specify the SRV record port") | 0.001435 |
def create_template(material, path, show=False):
"""
Create a template csv file for a data set.
:param material: the name of the material
:param path: the path of the directory where the file must be written
:param show: a boolean indicating whether the created file should be \
displayed after creation
"""
file_name = 'dataset-%s.csv' % material.lower()
file_path = os.path.join(path, file_name)
with open(file_path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Name', material])
writer.writerow(['Description', '<Add a data set description '
'here.>'])
writer.writerow(['Reference', '<Add a reference to the source of '
'the data set here.>'])
writer.writerow(['Temperature', '<parameter 1 name>',
'<parameter 2 name>', '<parameter 3 name>'])
writer.writerow(['T', '<parameter 1 display symbol>',
'<parameter 2 display symbol>',
'<parameter 3 display symbol>'])
writer.writerow(['K', '<parameter 1 units>',
'<parameter 2 units>', '<parameter 3 units>'])
writer.writerow(['T', '<parameter 1 symbol>',
'<parameter 2 symbol>', '<parameter 3 symbol>'])
for i in range(10):
writer.writerow([100.0 + i*50, float(i), 10.0 + i, 100.0 + i])
if show is True:
webbrowser.open_new(file_path) | 0.001137 |
def dense_message_pass(node_states, edge_matrices):
"""Computes a_t from h_{t-1}, see bottom of page 3 in the paper.
Args:
node_states: [B, L, D] tensor (h_{t-1})
edge_matrices (tf.float32): [B, L*D, L*D]
Returns:
messages (tf.float32): [B, L, D] For each pair
of nodes in the graph a message is sent along both the incoming and
outgoing edge.
"""
batch_size, num_nodes, node_dim = common_layers.shape_list(node_states)
# Stack the nodes as a big column vector.
h_flat = tf.reshape(
node_states, [batch_size, num_nodes * node_dim, 1], name="h_flat")
messages = tf.reshape(
tf.matmul(edge_matrices, h_flat), [batch_size * num_nodes, node_dim],
name="messages_matmul")
message_bias = tf.get_variable("message_bias", shape=node_dim)
messages = messages + message_bias
messages = tf.reshape(messages, [batch_size, num_nodes, node_dim])
return messages | 0.010893 |
def call(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
on_error='ignore', returncode=False):
"""Run the given command (with shell=False) and return the output as a
string.
Strips the output of enclosing whitespace.
If the return code is non-zero, throw GitInvocationError.
"""
# start external command process
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get outputs
out, _ = p.communicate()
# throw exception if process failed
if p.returncode != 0 and on_error == 'raise':
raise GitInvocationError('Failed to run "%s"' % " ".join(command))
if returncode:
return out.strip(), p.returncode
else:
return out.strip() | 0.001292 |
def send_exit_status(self, status):
"""
Send the exit status of an executed command to the client. (This
really only makes sense in server mode.) Many clients expect to
get some sort of status code back from an executed command after
it completes.
@param status: the exit code of the process
@type status: int
@since: 1.2
"""
# in many cases, the channel will not still be open here.
# that's fine.
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('exit-status')
m.add_boolean(False)
m.add_int(status)
self.transport._send_user_message(m) | 0.005355 |
def get_member_by_uuid(self, member_uuid):
"""
Returns the member with specified member uuid.
:param member_uuid: (int), uuid of the desired member.
:return: (:class:`~hazelcast.core.Member`), the corresponding member.
"""
for member in self.get_member_list():
if member.uuid == member_uuid:
return member | 0.005236 |
def _normalize_cmd_args(cmd):
"""Normalize subprocess arguments to handle list commands, string and pipes.
Piped commands set pipefail and require use of bash to help with debugging
intermediate errors.
"""
if isinstance(cmd, six.string_types):
# check for standard or anonymous named pipes
if cmd.find(" | ") > 0 or cmd.find(">(") or cmd.find("<("):
return "set -o pipefail; " + cmd, True, find_bash()
else:
return cmd, True, None
else:
return [str(x) for x in cmd], False, None | 0.003578 |
def create_model(self, model):
"""Ran when a new model is created."""
super().create_model(model)
for mixin in self.post_processing_mixins:
mixin.create_model(model) | 0.009852 |
def _check_data_flow_id(self, data_flow):
"""Checks the validity of a data flow id
Checks whether the id of the given data flow is already by anther data flow used within the state.
:param rafcon.core.data_flow.DataFlow data_flow: The data flow to be checked
:return bool validity, str message: validity is True, when the data flow is valid, False else. message gives
more information especially if the data flow is not valid
"""
data_flow_id = data_flow.data_flow_id
if data_flow_id in self.data_flows and data_flow is not self.data_flows[data_flow_id]:
return False, "data_flow_id already existing"
return True, "valid" | 0.008439 |
def _satisfied(self, cl, model):
"""
Given a clause (as an iterable of integers) and an assignment (as a
list of integers), this method checks whether or not the assignment
satisfies the clause. This is done by a simple clause traversal.
The method is invoked from :func:`_filter_satisfied`.
:param cl: a clause to check
:param model: an assignment
:type cl: iterable(int)
:type model: list(int)
:rtype: bool
"""
for l in cl:
if len(model) < abs(l) or model[abs(l) - 1] == l:
# either literal is unassigned or satisfied by the model
return True
return False | 0.004021 |
def visible_to_user(self, element, *ignorable):
"""
Determines whether an element is visible to the user. A list of
ignorable elements can be passed to this function. These would
typically be things like invisible layers sitting atop other
elements. This function ignores these elements by setting
their CSS ``display`` parameter to ``none`` before checking,
and restoring them to their initial value after checking. The
list of ignorable elements should not contain elements that
would disturb the position of the element to check if their
``display`` parameter is set to ``none``. Otherwise, the
algorithm is likely to fail.
:param element: The element to check.
:type element: :class:`selenium.webdriver.remote.webelement.WebElement`
:param ignorable: The elements that can be ignored.
:type ignorable: :class:`list` of :strings that are CSS selectors.
"""
if not element.is_displayed():
return False
return self.driver.execute_script("""
var el = arguments[0];
var ignorable = arguments[1];
var old_displays = ignorable.map(function (x) {
var old = x.style.display;
x.style.display = "none";
return old;
});
try {
var rect = el.getBoundingClientRect();
// Sigh... we need to round the numbers to avoid running into
// factional pixels causing the following test to fail.
rect = {
left: Math.ceil(rect.left),
right: Math.floor(rect.right),
top: Math.ceil(rect.top),
bottom: Math.floor(rect.bottom)
}
var ret = false;
var efp = document.elementFromPoint.bind(document);
var at_corner;
ret = ((at_corner = efp(rect.left, rect.top)) === el) ||
el.contains(at_corner) ||
((at_corner = efp(rect.left, rect.bottom)) === el) ||
el.contains(at_corner) ||
((at_corner = efp(rect.right, rect.top)) === el) ||
el.contains(at_corner) ||
((at_corner = efp(rect.right, rect.bottom)) === el) ||
el.contains(at_corner);
}
finally {
var ix = 0;
ignorable.forEach(function (x) {
x.style.display = old_displays[ix];
ix++;
});
}
return ret;
""", element, ignorable) | 0.000773 |
def design(self, max_stimuli=-1, max_inhibitors=-1, max_experiments=10, relax=False, configure=None):
"""
Finds all optimal experimental designs using up to :attr:`max_experiments` experiments, such that each experiment has
up to :attr:`max_stimuli` stimuli and :attr:`max_inhibitors` inhibitors. Each optimal experimental design is appended in the
attribute :attr:`designs` as an instance of :class:`caspo.core.clamping.ClampingList`.
Example::
>>> from caspo import core, design
>>> networks = core.LogicalNetworkList.from_csv('behaviors.csv')
>>> setup = core.Setup.from_json('setup.json')
>>> designer = design.Designer(networks, setup)
>>> designer.design(3, 2)
>>> for i,d in enumerate(designer.designs):
... f = 'design-%s' % i
... d.to_csv(f, stimuli=self.setup.stimuli, inhibitors=self.setup.inhibitors)
Parameters
----------
max_stimuli : int
Maximum number of stimuli per experiment
max_inhibitors : int
Maximum number of inhibitors per experiment
max_experiments : int
Maximum number of experiments per design
relax : boolean
Whether to relax the full-pairwise networks discrimination (True) or not (False).
If relax equals True, the number of experiments per design is fixed to :attr:`max_experiments`
configure : callable
Callable object responsible of setting clingo configuration
"""
self.designs = []
args = ['-c maxstimuli=%s' % max_stimuli, '-c maxinhibitors=%s' % max_inhibitors, '-Wno-atom-undefined']
clingo = gringo.Control(args)
clingo.conf.solve.opt_mode = 'optN'
if configure is not None:
configure(clingo.conf)
clingo.add("base", [], self.instance)
clingo.load(self.encodings['design'])
clingo.ground([("base", [])])
if relax:
parts = [("step", [step]) for step in xrange(1, max_experiments+1)]
parts.append(("diff", [max_experiments + 1]))
clingo.ground(parts)
ret = clingo.solve(on_model=self.__save__)
else:
step, ret = 0, gringo.SolveResult.UNKNOWN
while step <= max_experiments and ret != gringo.SolveResult.SAT:
parts = []
parts.append(("check", [step]))
if step > 0:
clingo.release_external(gringo.Fun("query", [step-1]))
parts.append(("step", [step]))
clingo.cleanup_domains()
clingo.ground(parts)
clingo.assign_external(gringo.Fun("query", [step]), True)
ret, step = clingo.solve(on_model=self.__save__), step + 1
self.stats['time_optimum'] = clingo.stats['time_solve']
self.stats['time_enumeration'] = clingo.stats['time_total']
self._logger.info("%s optimal experimental designs found in %.4fs", len(self.designs), self.stats['time_enumeration']) | 0.003518 |
def device_id(self):
"""
Randomly generated deviceId.
:return:
"""
if self._device_id is None:
self._device_id = "".join(
random.choice("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") for _ in range(50))
return self._device_id | 0.009375 |
def _get_stream_metadata(self, use_cached):
"""Retrieve metadata about this stream from Device Cloud"""
if self._cached_data is None or not use_cached:
try:
self._cached_data = self._conn.get_json("/ws/DataStream/%s" % self._stream_id)["items"][0]
except DeviceCloudHttpException as http_exception:
if http_exception.response.status_code == 404:
raise NoSuchStreamException("Stream with id %r has not been created" % self._stream_id)
raise http_exception
return self._cached_data | 0.006723 |
def support(self, version):
"""
return `True` if current python version match version passed.
raise a deprecation warning if only PY2 or PY3 is supported as you probably
have a conditional that should be removed.
"""
if not self._known_version(version):
warn("unknown feature: %s"%version)
return True
else:
if not self._get_featureset_support(version):
warn("You are not supporting %s anymore "%str(version), UserWarning, self.level)
if self._alone_version(version):
warn("%s is the last supported feature of this group, you can simplifiy this logic. "%str(version), UserWarning,self.level)
return self.predicates.get(version, True)
if (not self.PY3_supported) or (not self.PY2_supported):
warn("You are only supporting 1 version of Python", UserWarning, self.level)
if version == PY3:
return sys.version_info.major == 3
elif version == PY2:
return sys.version_info.major == 2 | 0.012522 |
def eqCoords(lon, lat):
""" Converts from ecliptical to equatorial coordinates.
This algorithm is described in book 'Primary Directions',
pp. 147-150.
"""
# Convert to radians
_lambda = math.radians(lon)
_beta = math.radians(lat)
_epson = math.radians(23.44) # The earth's inclination
# Declination in radians
decl = math.asin(math.sin(_epson) * math.sin(_lambda) * math.cos(_beta) + \
math.cos(_epson) * math.sin(_beta))
# Equatorial Distance in radians
ED = math.acos(math.cos(_lambda) * math.cos(_beta) / math.cos(decl))
# RA in radians
ra = ED if lon < 180 else math.radians(360) - ED
# Correctness of RA if longitude is close to 0º or 180º in a radius of 5º
if (abs(angle.closestdistance(lon, 0)) < 5 or
abs(angle.closestdistance(lon, 180)) < 5):
a = math.sin(ra) * math.cos(decl)
b = math.cos(_epson) * math.sin(_lambda) * math.cos(_beta) - \
math.sin(_epson) * math.sin(_beta)
if (math.fabs(a-b) > 0.0003):
ra = math.radians(360) - ra
return (math.degrees(ra), math.degrees(decl)) | 0.011785 |
def raise_stmt__30(self, raise_loc, exc_opt):
"""(3.0-) raise_stmt: 'raise' [test ['from' test]]"""
exc = from_loc = cause = None
loc = raise_loc
if exc_opt:
exc, cause_opt = exc_opt
loc = loc.join(exc.loc)
if cause_opt:
from_loc, cause = cause_opt
loc = loc.join(cause.loc)
return ast.Raise(exc=exc, inst=None, tback=None, cause=cause,
keyword_loc=raise_loc, from_loc=from_loc, loc=loc) | 0.003846 |
def change_view(self, change_in_depth):
"""Change the view depth by expand or collapsing all same-level nodes"""
self.current_view_depth += change_in_depth
if self.current_view_depth < 0:
self.current_view_depth = 0
self.collapseAll()
if self.current_view_depth > 0:
for item in self.get_items(maxlevel=self.current_view_depth-1):
item.setExpanded(True) | 0.006803 |
def _single_mp_run(x, Phi, bound, max_iter, verbose=False, pad=0,
random_state=None, memory=Memory(None)):
""" run of the RSSMP algorithm """
rng = check_random_state(random_state)
pad = int(pad)
x = np.concatenate((np.zeros(pad), x, np.zeros(pad)))
n = x.size
m = Phi.doth(x).size
err_mse = []
# Initialisation
residual = np.concatenate((x.copy(), np.zeros(max(Phi.sizes) // 2)))
s = np.zeros(m)
x_est = np.zeros(n)
# Main algorithm
coeffs = np.zeros(m)
it_number = 0
current_lambda = 1
err_mse.append(linalg.norm(residual))
# Decomposition loop: stopping criteria is either SNR or iteration number
while (current_lambda > bound) & (it_number < max_iter):
# pick a shift at random : in each size
rndshifts = []
for scale_idx, size in enumerate(Phi.sizes):
shift = rng.randint(low=0, high=size // 4)
coeffs[scale_idx * n:(scale_idx + 1) * n] = mdct(
residual[shift:shift + n], size).ravel()
rndshifts.append(shift)
# Select a new element
idx = np.argmax(np.abs(coeffs))
# Update coefficients
s[idx] += coeffs[idx]
# Only one method now : local update via a cached waveform
# find scale and frequency bin of selected atom
mdct_wf = memory.cache(mdct_waveform)
scale_idx = idx // n
size = Phi.sizes[scale_idx]
F = n // (size // 2)
frame = (idx - (scale_idx * n)) % F
freq_bin = ((idx - (scale_idx * n))) // F
pos = (frame * size // 2) - size // 4 + rndshifts[scale_idx]
residual[pos:pos + size] -= coeffs[idx] * mdct_wf(size, freq_bin)
# also add it to the reconstruction
x_est[pos:pos + size] += coeffs[idx] * mdct_wf(size, freq_bin)
# error computation (err_mse)
err_mse.append(linalg.norm(residual))
current_lambda = np.sqrt(1 - err_mse[-1] / err_mse[-2])
if current_lambda <= bound:
x_est[pos:pos + size] -= coeffs[idx] * mdct_wf(size, freq_bin)
if verbose:
print("Iteration %d : Current lambda of %1.4f" % (
it_number, current_lambda))
it_number += 1
return x_est, err_mse | 0.000439 |
def _set_or_check_remote_id(self, remote_id):
"""Set or check the remote id."""
if not self.remote_id:
assert self.closed_state == self.ClosedState.PENDING, 'Bad ClosedState!'
self.remote_id = remote_id
self.closed_state = self.ClosedState.OPEN
elif self.remote_id != remote_id:
raise usb_exceptions.AdbProtocolError(
'%s remote-id change to %s', self, remote_id) | 0.012225 |
def process_updated_files(self, paths: List[str]) -> List[str]:
"""
Return the paths in the analysis directory (symbolic links)
corresponding to the given paths.
Result also includes any files which are within a tracked directory.
This method will remove/add symbolic links for deleted/new files.
"""
tracked_files = []
deleted_paths = [path for path in paths if not os.path.isfile(path)]
# TODO(T40580762) use buck targets to properly identify what new files belong
# to the project rather than checking if they are within the current directory
new_paths = [
path
for path in paths
if path not in self._symbolic_links
and os.path.isfile(path)
and is_parent(os.getcwd(), path)
]
updated_paths = [
path
for path in paths
if path not in deleted_paths and path not in new_paths
]
if deleted_paths:
LOG.info("Detected deleted paths: `%s`.", "`,`".join(deleted_paths))
for path in deleted_paths:
link = self._symbolic_links.pop(path, None)
if link:
try:
_delete_symbolic_link(link)
tracked_files.append(link)
except OSError:
LOG.warning("Failed to delete link at `%s`.", link)
if new_paths:
LOG.info("Detected new paths: %s.", ",".join(new_paths))
try:
for path, relative_link in buck.resolve_relative_paths(
new_paths
).items():
link = os.path.join(self.get_root(), relative_link)
try:
add_symbolic_link(link, path)
self._symbolic_links[path] = link
tracked_files.append(link)
except OSError:
LOG.warning("Failed to add link at %s.", link)
except buck.BuckException as error:
LOG.error("Exception occurred when querying buck: %s", error)
LOG.error("No new paths will be added to the analysis directory.")
for path in updated_paths:
if path in self._symbolic_links:
tracked_files.append(self._symbolic_links[path])
elif self._is_tracked(path):
tracked_files.append(path)
return tracked_files | 0.002782 |
def visitStarCardinality(self, ctx: ShExDocParser.StarCardinalityContext):
""" '*' """
self.expression.min = 0
self.expression.max = -1 | 0.012579 |
def prefix_attr_add(arg, opts, shell_opts):
""" Add attributes to a prefix
"""
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print("Prefix %s not found in %s." % (arg, vrf_format(v)), file=sys.stderr)
return
p = res[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr)
sys.exit(1)
if key in p.avps:
print("Unable to add extra-attribute: '%s' already exists." % key, file=sys.stderr)
sys.exit(1)
p.avps[key] = value
try:
p.save()
except NipapError as exc:
print("Could not save prefix changes: %s" % str(exc), file=sys.stderr)
sys.exit(1)
print("Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf))) | 0.005814 |
def envCheckFlag(self, name, default = False):
"""Check graph flag for enabling / disabling attributes through
the use of <name> environment variable.
@param name: Name of flag.
(Also determines the environment variable name.)
@param default: Boolean (True or False). Default value for flag.
@return: Return True if the flag is enabled.
"""
if self._flags.has_key(name):
return self._flags[name]
else:
val = self._env.get(name)
if val is None:
return default
elif val.lower() in ['yes', 'on']:
self._flags[name] = True
return True
elif val.lower() in ['no', 'off']:
self._flags[name] = False
return False
else:
raise AttributeError("Value for flag %s, must be yes, no, on or off"
% name) | 0.00892 |
def repo_id(self, repo: str) -> str:
""" Returns an unique identifier from a repo URL for the folder the repo is gonna be pulled in.
"""
if repo.startswith("http"):
repo_id = re.sub(r"https?://(.www)?", "", repo)
repo_id = re.sub(r"\.git/?$", "", repo_id)
else:
repo_id = repo.replace("file://", "")
repo_id = re.sub(r"\.git/?$", "", repo_id)
if repo_id.startswith("~"):
repo_id = str(Path(repo_id).resolve())
# replaces everything that isn't alphanumeric, a dot or an underscore
# to make sure it's a valid folder name and to keep it readable
# multiple consecutive invalid characters replaced with a single underscore
repo_id = re.sub(r"[^a-zA-Z0-9._]+", "_", repo_id)
# and add a hash of the original to make it absolutely unique
return repo_id + hashlib.sha256(repo.encode("utf-8")).hexdigest() | 0.004184 |
def makepilimage(self, scale = "log", negative = False):
"""
Makes a PIL image out of the array, respecting the z1 and z2 cutoffs.
By default we use a log scaling identical to iraf's, and produce an image of mode "L", i.e. grayscale.
But some drawings or colourscales will change the mode to "RGB" later, if you choose your own colours.
If you choose scale = "clog" or "clin", you get hue values (aka rainbow colours).
"""
if scale == "log" or scale == "lin":
self.negative = negative
numpyarrayshape = self.numpyarray.shape
calcarray = self.numpyarray.copy()
#calcarray.ravel() # does not change in place in fact !
calcarray = calcarray.clip(min = self.z1, max = self.z2)
if scale == "log":
calcarray = np.array(map(lambda x: loggray(x, self.z1, self.z2), calcarray))
else :
calcarray = np.array(map(lambda x: lingray(x, self.z1, self.z2), calcarray))
calcarray.shape = numpyarrayshape
bwarray = np.zeros(numpyarrayshape, dtype=np.uint8)
calcarray.round(out=bwarray)
if negative:
if self.verbose:
print "Using negative scale"
bwarray = 255 - bwarray
if self.verbose:
print "PIL range : [%i, %i]" % (np.min(bwarray), np.max(bwarray))
# We flip it so that (0, 0) is back in the bottom left corner as in ds9
# We do this here, so that you can write on the image from left to right :-)
self.pilimage = imop.flip(im.fromarray(bwarray.transpose()))
if self.verbose:
print "PIL image made with scale : %s" % scale
return 0
if scale == "clog" or scale == "clin":
"""
rainbow !
Algorithm for HSV to RGB from http://www.cs.rit.edu/~ncs/color/t_convert.html, by Eugene Vishnevsky
Same stuff then for f2n in C
h is from 0 to 360 (hue)
s from 0 to 1 (saturation)
v from 0 to 1 (brightness)
"""
self.negative = False
calcarray = self.numpyarray.transpose()
if scale == "clin":
calcarray = (calcarray.clip(min = self.z1, max = self.z2)-self.z1)/(self.z2 - self.z1) # 0 to 1
if scale == "clog":
calcarray = 10.0 + 990.0 * (calcarray.clip(min = self.z1, max = self.z2)-self.z1)/(self.z2 - self.z1) # 10 to 1000
calcarray = (np.log10(calcarray)-1.0)*0.5 # 0 to 1
#calcarray = calcarray * 359.0 # This is now our "hue value", 0 to 360
calcarray = (1.0-calcarray) * 300.0 # I limit this to not go into red again
# The order of colours is Violet < Blue < Green < Yellow < Red
# We prepare the output arrays
rcalcarray = np.ones(calcarray.shape)
gcalcarray = np.ones(calcarray.shape)
bcalcarray = np.ones(calcarray.shape)
h = calcarray/60.0 # sector 0 to 5
i = np.floor(h).astype(np.int)
v = 1.0 * np.ones(calcarray.shape)
s = 1.0 * np.ones(calcarray.shape)
f = h - i # factorial part of h, this is an array
p = v * ( 1.0 - s )
q = v * ( 1.0 - s * f )
t = v * ( 1.0 - s * ( 1.0 - f ) )
# sector 0:
indices = (i == 0)
rcalcarray[indices] = 255.0 * v[indices]
gcalcarray[indices] = 255.0 * t[indices]
bcalcarray[indices] = 255.0 * p[indices]
# sector 1:
indices = (i == 1)
rcalcarray[indices] = 255.0 * q[indices]
gcalcarray[indices] = 255.0 * v[indices]
bcalcarray[indices] = 255.0 * p[indices]
# sector 2:
indices = (i == 2)
rcalcarray[indices] = 255.0 * p[indices]
gcalcarray[indices] = 255.0 * v[indices]
bcalcarray[indices] = 255.0 * t[indices]
# sector 3:
indices = (i == 3)
rcalcarray[indices] = 255.0 * p[indices]
gcalcarray[indices] = 255.0 * q[indices]
bcalcarray[indices] = 255.0 * v[indices]
# sector 4:
indices = (i == 4)
rcalcarray[indices] = 255.0 * t[indices]
gcalcarray[indices] = 255.0 * p[indices]
bcalcarray[indices] = 255.0 * v[indices]
# sector 5:
indices = (i == 5)
rcalcarray[indices] = 255.0 * v[indices]
gcalcarray[indices] = 255.0 * p[indices]
bcalcarray[indices] = 255.0 * q[indices]
rarray = np.zeros(calcarray.shape, dtype=np.uint8)
garray = np.zeros(calcarray.shape, dtype=np.uint8)
barray = np.zeros(calcarray.shape, dtype=np.uint8)
rcalcarray.round(out=rarray)
gcalcarray.round(out=garray)
bcalcarray.round(out=barray)
carray = np.dstack((rarray,garray,barray))
self.pilimage = imop.flip(im.fromarray(carray, "RGB"))
if self.verbose:
print "PIL image made with scale : %s" % scale
return 0
raise RuntimeError, "I don't know your colourscale, choose lin log clin or clog !" | 0.014836 |
def create(self, properties):
"""
Create a :term:`Metrics Context` resource in the HMC this client is
connected to.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create Metrics Context' in the :term:`HMC API` book.
Returns:
:class:`~zhmcclient.MetricsContext`:
The resource object for the new :term:`Metrics Context` resource.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result = self.session.post('/api/services/metrics/context',
body=properties)
mc_properties = properties.copy()
mc_properties.update(result)
new_metrics_context = MetricsContext(self,
result['metrics-context-uri'],
None,
mc_properties)
self._metrics_contexts.append(new_metrics_context)
return new_metrics_context | 0.001613 |
def to_string(self, endpoints):
# type: (List[EndpointDescription]) -> str
"""
Converts the given endpoint description beans into a string
:param endpoints: A list of EndpointDescription beans
:return: A string containing an XML document
"""
# Make the ElementTree
root = self._make_xml(endpoints)
tree = ElementTree.ElementTree(root)
# Force the default name space
ElementTree.register_namespace("", EDEF_NAMESPACE)
# Make the XML
# Prepare a StringIO output
output = StringIO()
# Try to write with a correct encoding
tree.write(
output,
encoding=self._encoding,
xml_declaration=self._xml_declaration,
)
return output.getvalue().strip() | 0.00365 |
def format_config_read_queue(self,
use_color: bool = False,
max_col_width: int = 50) -> str:
"""
Prepares a string with pretty printed config read queue.
:param use_color: use terminal colors
:param max_col_width: limit column width, ``50`` by default
:return:
"""
try:
from terminaltables import SingleTable
except ImportError:
import warnings
warnings.warn('Cannot display config read queue. Install terminaltables first.')
return ''
col_names_order = ['path', 'value', 'type', 'parser']
pretty_bundles = [[self._colorize(name, name.capitalize(), use_color=use_color)
for name in col_names_order]]
for config_read_item in self.config_read_queue:
pretty_attrs = [
config_read_item.variable_path,
config_read_item.value,
config_read_item.type,
config_read_item.parser_name
]
pretty_attrs = [self._pformat(pa, max_col_width) for pa in pretty_attrs]
if config_read_item.is_default:
pretty_attrs[0] = '*' + pretty_attrs[0]
if use_color:
pretty_attrs = [self._colorize(column_name, pretty_attr, use_color=use_color)
for column_name, pretty_attr in zip(col_names_order, pretty_attrs)]
pretty_bundles.append(pretty_attrs)
table = SingleTable(pretty_bundles)
table.title = self._colorize('title', 'CONFIG READ QUEUE', use_color=use_color)
table.justify_columns[0] = 'right'
# table.inner_row_border = True
return str(table.table) | 0.005565 |
def _parallel_extracter(data_dir, number_of_test, number_of_dev, total, counter):
"""Generate a function to extract a tar file based on given parameters
This works by currying the above given arguments into a closure
in the form of the following function.
:param data_dir: the target directory to extract into
:param number_of_test: the number of files to keep as the test set
:param number_of_dev: the number of files to keep as the dev set
:param total: the total number of files to extract
:param counter: an atomic counter to keep track of # of extracted files
:return: a function that actually extracts a tar file given these params
"""
def extract(d):
"""Binds data_dir, number_of_test, number_of_dev, total, and counter into this scope
Extracts the given file
:param d: a tuple consisting of (index, file) where index is the index
of the file to extract and file is the name of the file to extract
"""
(i, archive) = d
if i < number_of_test:
dataset_dir = path.join(data_dir, "test")
elif i<number_of_test+number_of_dev:
dataset_dir = path.join(data_dir, "dev")
else:
dataset_dir = path.join(data_dir, "train")
if not gfile.Exists(path.join(dataset_dir, '.'.join(filename_of(archive).split(".")[:-1]))):
c = counter.increment()
print('Extracting file {} ({}/{})...'.format(i+1, c, total))
tar = tarfile.open(archive)
tar.extractall(dataset_dir)
tar.close()
return extract | 0.004848 |
def uri_read(*args, **kwargs):
"""
Reads the contents of a URI into a string or bytestring.
See :func:`uri_open` for complete description of keyword parameters.
:returns: Contents of URI
:rtype: str, bytes
"""
with uri_open(*args, **kwargs) as f:
content = f.read()
return content | 0.003106 |
def parse_node(self, node):
"""
Parses the specified child task node, and returns the task spec. This
can be called by a TaskParser instance, that is owned by this
ProcessParser.
"""
if node.get('id') in self.parsed_nodes:
return self.parsed_nodes[node.get('id')]
(node_parser, spec_class) = self.parser._get_parser_class(node.tag)
if not node_parser or not spec_class:
raise ValidationException(
"There is no support implemented for this task type.",
node=node, filename=self.filename)
np = node_parser(self, spec_class, node)
task_spec = np.parse_node()
return task_spec | 0.002782 |
def vote(self, candidates):
"""Rank artifact candidates.
The voting is needed for the agents living in societies using
social decision making. The function should return a sorted list
of (candidate, evaluation)-tuples. Depending on the social choice
function used, the evaluation might be omitted from the actual decision
making, or only a number of (the highest ranking) candidates may be
used.
This basic implementation ranks candidates based on
:meth:`~creamas.core.agent.CreativeAgent.evaluate`.
:param candidates:
list of :py:class:`~creamas.core.artifact.Artifact` objects to be
ranked
:returns:
Ordered list of (candidate, evaluation)-tuples
"""
ranks = [(c, self.evaluate(c)[0]) for c in candidates]
ranks.sort(key=operator.itemgetter(1), reverse=True)
return ranks | 0.002144 |
def refreshCompositeOf(self, single_keywords, composite_keywords,
store=None, namespace=None):
"""Re-check sub-parts of this keyword.
This should be called after the whole RDF was processed, because
it is using a cache of single keywords and if that
one is incomplete, you will not identify all parts.
"""
def _get_ckw_components(new_vals, label):
if label in single_keywords:
new_vals.append(single_keywords[label])
elif ('Composite.%s' % label) in composite_keywords:
for l in composite_keywords['Composite.{0}'.format(label)].compositeof: # noqa
_get_ckw_components(new_vals, l)
elif label in composite_keywords:
for l in composite_keywords[label].compositeof:
_get_ckw_components(new_vals, l)
else:
# One single or composite keyword is missing from the taxonomy.
# This is due to an error in the taxonomy description.
message = "The composite term \"%s\""\
" should be made of single keywords,"\
" but at least one is missing." % self.id
if store is not None:
message += "Needed components: %s"\
% list(store.objects(self.id,
namespace["compositeOf"]))
message += " Missing is: %s" % label
raise TaxonomyError(message)
if self.compositeof:
new_vals = []
try:
for label in self.compositeof:
_get_ckw_components(new_vals, label)
self.compositeof = new_vals
except TaxonomyError as err:
# the composites will be empty
# (better than to have confusing, partial matches)
self.compositeof = []
current_app.logger.error(err) | 0.002444 |
def unsubscribe(self, event, handler):
"""
Unsubscribes the Handler from the given Event.
Both synchronous and asynchronous handlers are removed.
@param event: (str|see.Event) event to which the handler is subscribed.
@param handler: (callable) function or method to unsubscribe.
"""
try:
self._handlers.sync_handlers[event].remove(handler)
except ValueError:
self._handlers.async_handlers[event].remove(handler)
else:
try:
self._handlers.async_handlers[event].remove(handler)
except ValueError:
pass | 0.003063 |
def _F_hyperedge_head_cardinality(H, F):
"""Returns the result of a function F applied to the set of cardinalities
of hyperedge heads in the hypergraph.
:param H: the hypergraph whose head cardinalities will be
operated on.
:param F: function to execute on the set of cardinalities in the
hypergraph.
:returns: result of the given function F.
:raises: TypeError -- Algorithm only applicable to directed hypergraphs
"""
if not isinstance(H, DirectedHypergraph):
raise TypeError("Algorithm only applicable to directed hypergraphs")
return F([len(H.get_hyperedge_head(hyperedge_id))
for hyperedge_id in H.get_hyperedge_id_set()]) | 0.001393 |
def check_time_event(oqparam, occupancy_periods):
"""
Check the `time_event` parameter in the datastore, by comparing
with the periods found in the exposure.
"""
time_event = oqparam.time_event
if time_event and time_event not in occupancy_periods:
raise ValueError(
'time_event is %s in %s, but the exposure contains %s' %
(time_event, oqparam.inputs['job_ini'],
', '.join(occupancy_periods))) | 0.00216 |
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op | 0.011066 |
def lf_overlaps(L, normalize_by_coverage=False):
"""Return the **fraction of items each LF labels that are also labeled by at
least one other LF.**
Note that the maximum possible overlap fraction for an LF is the LF's
coverage, unless `normalize_by_coverage=True`, in which case it is 1.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
normalize_by_coverage: Normalize by coverage of the LF, so that it
returns the percent of LF labels that have overlaps.
"""
overlaps = (L != 0).T @ _overlapped_data_points(L) / L.shape[0]
if normalize_by_coverage:
overlaps /= lf_coverages(L)
return np.nan_to_num(overlaps) | 0.002656 |
def read_uint(data, start, length):
"""Extract a uint from a position in a sequence."""
return int.from_bytes(data[start:start+length], byteorder='big') | 0.00625 |
def remote_sys_desc_uneq_store(self, remote_system_desc):
"""This function saves the system desc, if different from stored. """
if remote_system_desc != self.remote_system_desc:
self.remote_system_desc = remote_system_desc
return True
return False | 0.00678 |
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path | 0.001299 |
def sample_forecast_max_hail(self, dist_model_name, condition_model_name,
num_samples, condition_threshold=0.5, query=None):
"""
Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes.
Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals
num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum
value within each area sample is used.
Args:
dist_model_name: Name of the distribution machine learning model being evaluated
condition_model_name: Name of the hail/no-hail model being evaluated
num_samples: Number of maximum hail samples to draw
condition_threshold: Threshold for drawing hail samples
query: A str that selects a subset of the data for evaluation
Returns:
A numpy array containing maximum hail samples for each forecast object.
"""
if query is not None:
dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query)
dist_forecasts = dist_forecasts.reset_index(drop=True)
condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query)
condition_forecasts = condition_forecasts.reset_index(drop=True)
else:
dist_forecasts = self.matched_forecasts["dist"][dist_model_name]
condition_forecasts = self.matched_forecasts["condition"][condition_model_name]
max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples))
areas = dist_forecasts["Area"].values
for f in np.arange(dist_forecasts.shape[0]):
condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]]
if condition_prob >= condition_threshold:
max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values,
size=(num_samples, areas[f])).max(axis=1))
return max_hail_samples | 0.00736 |
def pre_delete_title(instance, **kwargs):
''' Update article.languages
'''
if instance.article.languages:
languages = instance.article.languages.split(',')
else:
languages = []
if instance.language in languages:
languages.remove(instance.language)
instance.article.languages = ','.join(languages)
instance.article._publisher_keep_state = True
instance.article.save(no_signals=True) | 0.002227 |
def camera_action_raw(self, world_pos):
"""Return a `sc_pb.Action` with the camera movement filled."""
action = sc_pb.Action()
world_pos.assign_to(action.action_raw.camera_move.center_world_space)
return action | 0.004425 |
def cmd_connect(node, cmd_name, node_info):
"""Connect to node."""
# FUTURE: call function to check for custom connection-info
conn_info = "Defaults"
conf_mess = ("\r{0}{1} TO{2} {3} using {5}{4}{2} - Confirm [y/N]: ".
format(C_STAT[cmd_name.upper()], cmd_name.upper(), C_NORM,
node_info, conn_info, C_HEAD2))
cmd_result = None
if input_yn(conf_mess):
exec_mess = ("\r{0}CONNECTING TO{1} {2} using {4}{3}{1}: ".
format(C_STAT[cmd_name.upper()], C_NORM, node_info,
conn_info, C_HEAD2))
ui_erase_ln()
ui_print(exec_mess)
(ssh_user, ssh_key) = ssh_get_info(node)
if ssh_user:
ssh_cmd = "ssh {0}{1}@{2}".format(ssh_key, ssh_user,
node.public_ips)
else:
ssh_cmd = "ssh {0}{1}".format(ssh_key, node.public_ips)
print("\n")
ui_print("\033[?25h") # cursor on
subprocess.call(ssh_cmd, shell=True)
ui_print("\033[?25l") # cursor off
print()
cmd_result = True
else:
ui_print_suffix("Command Aborted")
sleep(0.75)
return cmd_result | 0.000816 |
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params) | 0.003782 |
def add_macro(self,name,value):
"""
Add a variable (macro) for this node. This can be different for
each node in the DAG, even if they use the same CondorJob. Within
the CondorJob, the value of the macro can be referenced as
'$(name)' -- for instance, to define a unique output or error file
for each node.
@param name: macro name.
@param value: value of the macro for this node in the DAG
"""
macro = self.__bad_macro_chars.sub( r'', name )
self.__opts[macro] = value | 0.009728 |
def should_cache(self, request, response):
""" Given the request and response should it be cached """
if not getattr(request, '_cache_update_cache', False):
return False
if not response.status_code in getattr(settings, 'BETTERCACHE_CACHEABLE_STATUS', CACHEABLE_STATUS):
return False
if getattr(settings, 'BETTERCACHE_ANONYMOUS_ONLY', False) and self.session_accessed and request.user.is_authenticated:
return False
if self.has_uncacheable_headers(response):
return False
return True | 0.008636 |
def NRTL(xs, taus, alphas):
r'''Calculates the activity coefficients of each species in a mixture
using the Non-Random Two-Liquid (NRTL) method, given their mole fractions,
dimensionless interaction parameters, and nonrandomness constants. Those
are normally correlated with temperature in some form, and need to be
calculated separately.
.. math::
\ln(\gamma_i)=\frac{\displaystyle\sum_{j=1}^{n}{x_{j}\tau_{ji}G_{ji}}}
{\displaystyle\sum_{k=1}^{n}{x_{k}G_{ki}}}+\sum_{j=1}^{n}
{\frac{x_{j}G_{ij}}{\displaystyle\sum_{k=1}^{n}{x_{k}G_{kj}}}}
{\left ({\tau_{ij}-\frac{\displaystyle\sum_{m=1}^{n}{x_{m}\tau_{mj}
G_{mj}}}{\displaystyle\sum_{k=1}^{n}{x_{k}G_{kj}}}}\right )}
G_{ij}=\text{exp}\left ({-\alpha_{ij}\tau_{ij}}\right )
Parameters
----------
xs : list[float]
Liquid mole fractions of each species, [-]
taus : list[list[float]]
Dimensionless interaction parameters of each compound with each other,
[-]
alphas : list[list[float]]
Nonrandomness constants of each compound interacting with each other, [-]
Returns
-------
gammas : list[float]
Activity coefficient for each species in the liquid mixture, [-]
Notes
-----
This model needs N^2 parameters.
One common temperature dependence of the nonrandomness constants is:
.. math::
\alpha_{ij}=c_{ij}+d_{ij}T
Most correlations for the interaction parameters include some of the terms
shown in the following form:
.. math::
\tau_{ij}=A_{ij}+\frac{B_{ij}}{T}+\frac{C_{ij}}{T^{2}}+D_{ij}
\ln{\left ({T}\right )}+E_{ij}T^{F_{ij}}
Examples
--------
Ethanol-water example, at 343.15 K and 1 MPa:
>>> NRTL(xs=[0.252, 0.748], taus=[[0, -0.178], [1.963, 0]],
... alphas=[[0, 0.2974],[.2974, 0]])
[1.9363183763514304, 1.1537609663170014]
References
----------
.. [1] Renon, Henri, and J. M. Prausnitz. "Local Compositions in
Thermodynamic Excess Functions for Liquid Mixtures." AIChE Journal 14,
no. 1 (1968): 135-144. doi:10.1002/aic.690140124.
.. [2] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey.
Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim:
Wiley-VCH, 2012.
'''
gammas = []
cmps = range(len(xs))
Gs = [[exp(-alphas[i][j]*taus[i][j]) for j in cmps] for i in cmps]
for i in cmps:
tn1, td1, total2 = 0., 0., 0.
for j in cmps:
# Term 1, numerator and denominator
tn1 += xs[j]*taus[j][i]*Gs[j][i]
td1 += xs[j]*Gs[j][i]
# Term 2
tn2 = xs[j]*Gs[i][j]
td2 = td3 = sum([xs[k]*Gs[k][j] for k in cmps])
tn3 = sum([xs[m]*taus[m][j]*Gs[m][j] for m in cmps])
total2 += tn2/td2*(taus[i][j] - tn3/td3)
gamma = exp(tn1/td1 + total2)
gammas.append(gamma)
return gammas | 0.001008 |
def system_describe_projects(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/describeProjects API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/describeProjects
"""
return DXHTTPRequest('/system/describeProjects', input_params, always_retry=always_retry, **kwargs) | 0.007979 |
def get_date_format_string(period):
"""
For a given period (e.g. 'month', 'day', or some numeric interval
such as 3600 (in secs)), return the format string that can be
used with strftime to format that time to specify the times
across that interval, but no more detailed.
For example,
>>> get_date_format_string('month')
'%Y-%m'
>>> get_date_format_string(3600)
'%Y-%m-%d %H'
>>> get_date_format_string('hour')
'%Y-%m-%d %H'
>>> get_date_format_string(None)
Traceback (most recent call last):
...
TypeError: period must be a string or integer
>>> get_date_format_string('garbage')
Traceback (most recent call last):
...
ValueError: period not in (second, minute, hour, day, month, year)
"""
# handle the special case of 'month' which doesn't have
# a static interval in seconds
if isinstance(period, six.string_types) and period.lower() == 'month':
return '%Y-%m'
file_period_secs = get_period_seconds(period)
format_pieces = ('%Y', '-%m-%d', ' %H', '-%M', '-%S')
seconds_per_second = 1
intervals = (
seconds_per_year,
seconds_per_day,
seconds_per_hour,
seconds_per_minute,
seconds_per_second,
)
mods = list(map(lambda interval: file_period_secs % interval, intervals))
format_pieces = format_pieces[: mods.index(0) + 1]
return ''.join(format_pieces) | 0.030046 |
def fmt(self, value):
""" Sets self.fmt, with some extra help for plain format strings. """
if isinstance(value, str):
value = value.split(self.join_str)
if not (value and isinstance(value, (list, tuple))):
raise TypeError(
' '.join((
'Expecting str or list/tuple of formats {!r}.',
'Got: ({}) {!r}'
)).format(
self.default_format,
type(value).__name__,
value,
))
self._fmt = value | 0.003419 |
def language_selector(context):
""" displays a language selector dropdown in the admin, based on Django "LANGUAGES" context.
requires:
* USE_I18N = True / settings.py
* LANGUAGES specified / settings.py (otherwise all Django locales will be displayed)
* "set_language" url configured (see https://docs.djangoproject.com/en/dev/topics/i18n/translation/#the-set-language-redirect-view)
"""
output = ""
i18 = getattr(settings, 'USE_I18N', False)
if i18:
template = "admin/language_selector.html"
context['i18n_is_set'] = True
try:
output = render_to_string(template, context)
except:
pass
return output | 0.006916 |
def copyright_model_factory(*, validator=validators.is_copyright_model,
**kwargs):
"""Generate a Copyright model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword
argument is given.
"""
kwargs['ld_type'] = 'Copyright'
return _model_factory(validator=validator, **kwargs) | 0.002193 |
def rotate_direction(hexgrid_type, direction, ccw=True):
"""
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
"""
if hexgrid_type in [TILE, EDGE]:
directions = ['NW', 'W', 'SW', 'SE', 'E', 'NE', 'NW'] if ccw \
else ['NW', 'NE', 'E', 'SE', 'SW', 'W', 'NW']
return directions[directions.index(direction) + 1]
elif hexgrid_type in [NODE]:
directions = ['N', 'NW', 'SW', 'S', 'SE', 'NE', 'N'] if ccw \
else ['N', 'NE', 'SE', 'S', 'SW', 'NW', 'N']
return directions[directions.index(direction) + 1]
else:
raise ValueError('Invalid hexgrid type={} passed to rotate direction'.format(hexgrid_type)) | 0.004242 |
def _piped_realign_gatk(data, region, cl, out_base_file, tmp_dir, prep_params):
"""Perform realignment with GATK, using input commandline.
GATK requires writing to disk and indexing before realignment.
"""
broad_runner = broad.runner_from_config(data["config"])
pa_bam = "%s-prealign%s" % os.path.splitext(out_base_file)
if not utils.file_exists(pa_bam):
with file_transaction(data, pa_bam) as tx_out_file:
cmd = "{cl} -o {tx_out_file}".format(**locals())
do.run(cmd, "GATK re-alignment {0}".format(region), data)
bam.index(pa_bam, data["config"])
realn_file = realign.gatk_realigner_targets(broad_runner, pa_bam, dd.get_ref_file(data), data["config"],
region=region_to_gatk(region),
known_vrns=dd.get_variation_resources(data))
realn_cl = realign.gatk_indel_realignment_cl(broad_runner, pa_bam, dd.get_ref_file(data),
realn_file, tmp_dir, region=region_to_gatk(region),
known_vrns=dd.get_variation_resources(data))
return pa_bam, realn_cl | 0.004979 |
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret | 0.011855 |
def datagetter(cls):
""" example datagetter function, make any local modifications here """
with open('myfile', 'rt') as f:
rows = [r for r in csv.reader(f)]
dothing = lambda _: [i for i, v in enumerate(_)]
rows = [dothing(_) for _ in rows]
raise NotImplementedError('You need to implement this yourlself!')
return rows | 0.007916 |
def empty_bar_plot(ax):
''' Delete all axis ticks and labels '''
plt.sca(ax)
plt.setp(plt.gca(),xticks=[],xticklabels=[])
return ax | 0.027027 |
def finalize(self):
"""
finalize simulation for consumer
"""
# todo sort self.result by path_num
if self.result:
self.result = sorted(self.result, key=lambda x: x[0])
p, r = map(list, zip(*self.result))
self.result = r | 0.006803 |
def pfm_to_pwm(self, pfm, pseudo=0.001):
"""Convert PFM with counts to a PFM with fractions.
Parameters
----------
pfm : list
2-dimensional list with counts.
pseudo : float
Pseudocount used in conversion.
Returns
-------
pwm : list
2-dimensional list with fractions.
"""
return [[(x + pseudo)/(float(np.sum(row)) + pseudo * 4) for x in row] for row in pfm] | 0.008299 |
def to_cursor_ref(self):
"""Returns dict of values to uniquely reference this item"""
fields = self._meta.get_primary_keys()
assert fields
values = {field.name:self.__data__[field.name] for field in fields}
return values | 0.011538 |
def cluster_health_for_shards(self, index=None, params={}, **kwargs):
"""
Return a list of cluster health of specified indices(default all) and
append shards information of each index
the first element is a dictionary represent a global information of the cluster
the second element represent a information of indices and its shards and each element is a dictionary
such as [{'index' : 'a', 'status' : 'yellow', ..., 'shards' : {'0' : {...}, '1' : {...}, ...}, ...]
"""
params['level'] = 'shards'
result = self.cluster_health(index, params, **kwargs)
return self._process_cluster_health_info(result) | 0.007364 |
def xmlstring(self, pretty_print=False):
"""Serialises this FoLiA element and all its contents to XML.
Returns:
str: a string with XML representation for this element and all its children"""
s = ElementTree.tostring(self.xml(), xml_declaration=False, pretty_print=pretty_print, encoding='utf-8')
if sys.version < '3':
if isinstance(s, str):
s = unicode(s,'utf-8') #pylint: disable=undefined-variable
else:
if isinstance(s,bytes):
s = str(s,'utf-8')
s = s.replace('ns0:','') #ugly patch to get rid of namespace prefix
s = s.replace(':ns0','')
return s | 0.019006 |
def get_reporters(self):
"""
Converts the report_generators list to a dictionary, and caches the result.
:return: A dictionary with such references.
"""
if not hasattr(self, '_report_generators_by_key'):
self._report_generators_by_key = {r.key: r for r in self.report_generators}
return self._report_generators_by_key | 0.010582 |
def clean(self):
"""
check the content of each field
:return:
"""
cleaned_data = super(UserServiceForm, self).clean()
sa = ServicesActivated.objects.get(name=self.initial['name'])
# set the name of the service, related to ServicesActivated model
cleaned_data['name'] = sa
if sa.auth_required and sa.self_hosted:
if cleaned_data.get('host') == '' or \
cleaned_data.get('username') == '' or \
cleaned_data.get('password') == '' or \
cleaned_data.get('client_id') == '' or \
cleaned_data.get('client_secret') == '':
self.add_error('username', 'All the five fields are altogether mandatory')
elif cleaned_data.get('host') is None:
self.add_error('host', 'Check its protocol and its name')
elif cleaned_data.get('host').endswith('/'):
cleaned_data['host'] = cleaned_data['host'][:-1] | 0.003018 |
def add_callback(obj, callback, args=()):
"""Add a callback to an object."""
callbacks = obj._callbacks
node = Node(callback, args)
# Store a single callback directly in _callbacks
if callbacks is None:
obj._callbacks = node
return node
# Otherwise use a dllist.
if not isinstance(callbacks, dllist):
obj._callbacks = dllist()
obj._callbacks.insert(callbacks)
callbacks = obj._callbacks
callbacks.insert(node)
return node | 0.002012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.