text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def modifyInPlace(self, *, sort=None, purge=False, done=None):
"""Like Model.modify, but changes existing database instead of
returning a new one."""
self.data = self.modify(sort=sort, purge=purge, done=done) | 0.008621 |
def execute(self, *args, **kwargs):
"""
Fill all variables from *args and **kwargs, build the request,
and send it. If we set the _verbose kwarg to true, then we'll
get a Response object back instead of loaded data.
"""
_verbose = kwargs.pop('_verbose', False)
return_full_object = kwargs.pop('return_full_object', False)
variables = self.variables().fill(*args, **kwargs)
return Request(self, variables).send(
traversal=self.traversal,
_verbose=_verbose,
return_full_object=return_full_object,
_timeout=self.timeout
) | 0.003096 |
def itemData(self, f=None, savePath=None):
""" returns data for an item on agol/portal
Inputs:
f - output format either zip of json
savePath - location to save the file
Output:
either JSON/text or filepath
"""
params = {
}
if f is not None and \
f.lower() in ['zip', 'json']:
params['f'] = f
url = "%s/data" % self.root
if self.type in ["Shapefile", "CityEngine Web Scene", "Web Scene", "KML",
"Code Sample",
"Code Attachment", "Operations Dashboard Add In",
"CSV", "CSV Collection", "CAD Drawing", "Service Definition",
"Microsoft Word", "Microsoft Powerpoint",
"Microsoft Excel", "PDF", "Image",
"Visio Document", "iWork Keynote", "iWork Pages",
"iWork Numbers", "Map Document", "Map Package",
"Basemap Package", "Tile Package", "Project Package",
"Task File", "ArcPad Package", "Explorer Map",
"Globe Document", "Scene Document", "Published Map",
"Map Template", "Windows Mobile Package", "Pro Map",
"Layout", "Layer", "Layer Package", "File Geodatabase",
"Explorer Layer", "Geoprocessing Package", "Geoprocessing Sample",
"Locator Package", "Rule Package", "Workflow Manager Package",
"Desktop Application", "Desktop Application Template",
"Code Sample", "Desktop Add In", "Explorer Add In",
"ArcGIS Desktop Add-In", "ArcGIS Explorer Add-In",
"ArcGIS Explorer application configuration", "ArcGIS Explorer document",
]:
if savePath is None:
raise AttributeError('savePath must be provided for a item of type: %s' % self.type)
if os.path.isdir(savePath) == False:
os.makedirs(savePath)
result = self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
out_folder=savePath,
file_name=self.name)
return result
else:
results = self._get(url, params,
proxy_port=self._proxy_port,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url)
return results | 0.003877 |
def get_access_info(self, unscoped_auth):
"""Get the access info object
We attempt to get the auth ref. If it fails and if the K2K auth plugin
was being used then we will prepend a message saying that the error was
on the service provider side.
:param: unscoped_auth: Keystone auth plugin for unscoped user
:returns: keystoneclient.access.AccessInfo object
"""
try:
unscoped_auth_ref = base.BasePlugin.get_access_info(
self, unscoped_auth)
except exceptions.KeystoneAuthException as excp:
msg = _('Service provider authentication failed. %s')
raise exceptions.KeystoneAuthException(msg % str(excp))
return unscoped_auth_ref | 0.002646 |
def load_jupyter_server_extension(app): # pragma: no cover
"""Use Jupytext's contents manager"""
if isinstance(app.contents_manager_class, TextFileContentsManager):
app.log.info("[Jupytext Server Extension] NotebookApp.contents_manager_class is "
"(a subclass of) jupytext.TextFileContentsManager already - OK")
return
# The server extension call is too late!
# The contents manager was set at NotebookApp.init_configurables
# Let's change the contents manager class
app.log.info('[Jupytext Server Extension] Changing NotebookApp.contents_manager_class '
'from {} to jupytext.TextFileContentsManager'.format(app.contents_manager_class.__name__))
app.contents_manager_class = TextFileContentsManager
try:
# And rerun selected init steps from https://github.com/jupyter/notebook/blob/
# 132f27306522b32fa667a6b208034cb7a04025c9/notebook/notebookapp.py#L1634-L1638
# app.init_configurables()
app.contents_manager = app.contents_manager_class(parent=app, log=app.log)
app.session_manager.contents_manager = app.contents_manager
# app.init_components()
# app.init_webapp()
app.web_app.settings['contents_manager'] = app.contents_manager
# app.init_terminals()
# app.init_signal()
except Exception:
app.log.error("""[Jupytext Server Extension] An error occured. Please deactivate the server extension with
jupyter serverextension disable jupytext
and configure the contents manager manually by adding
c.NotebookApp.contents_manager_class = "jupytext.TextFileContentsManager"
to your .jupyter/jupyter_notebook_config.py file.
""")
raise | 0.004622 |
def _parse_file_usage(cls, action_class, args):
"""Find all external files referenced by an action."""
fixed_files = {}
variable_files = []
if not hasattr(action_class, 'FILES'):
return fixed_files, variable_files
for file_arg in action_class.FILES:
arg_value = args.get(file_arg)
if arg_value is None:
raise RecipeFileInvalid("Action lists a file argument but none was given", declared_argument=file_arg, passed_arguments=args)
variables = _extract_variables(arg_value)
if len(variables) == 0:
fixed_files[file_arg] = arg_value
else:
variable_files.append(arg_value)
return fixed_files, variable_files | 0.003871 |
def do_download_datafiles(self, _):
''' Download datafiles. '''
contents = {"trialdata": lambda p: p.get_trial_data(), "eventdata": \
lambda p: p.get_event_data(), "questiondata": lambda p: \
p.get_question_data()}
query = Participant.query.all()
for k in contents:
ret = "".join([contents[k](p) for p in query])
temp_file = open(k + '.csv', 'w')
temp_file.write(ret)
temp_file.close() | 0.007921 |
def do_chan_log_all(self, line):
"""Set the channel log level to ALL_COMMS. Command syntax is: chan_log_all"""
self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS))
print('Channel log filtering level is now: {0}'.format(opendnp3.levels.ALL_COMMS)) | 0.016502 |
def mequg(m1, nr, nc):
"""
Set one double precision matrix of arbitrary size equal to another.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mequg_c.html
:param m1: Input matrix.
:type m1: NxM-Element Array of floats
:param nr: Row dimension of m1.
:type nr: int
:param nc: Column dimension of m1.
:type nc: int
:return: Output matrix equal to m1
:rtype: NxM-Element Array of floats
"""
m1 = stypes.toDoubleMatrix(m1)
mout = stypes.emptyDoubleMatrix(x=nc, y=nr)
nc = ctypes.c_int(nc)
nr = ctypes.c_int(nr)
libspice.mequg_c(m1, nc, nr, mout)
return stypes.cMatrixToNumpy(mout) | 0.001517 |
def interpreter(self):
"""
Launch an AML interpreter session for testing
"""
while True:
message = input('[#] ')
if message.lower().strip() == 'exit':
break
reply = self.get_reply('#interpreter#', message)
if not reply:
print('No reply received.', end='\n\n')
continue
# typewrite(reply, end='\n\n') TODO
print(reply, end='\n\n') | 0.004141 |
def _build_dictionary(self, models):
"""
Build a dictionary with the models.
:param models: The models
:type models: Collection
"""
for model in models:
key = getattr(model, self._morph_type, None)
if key:
foreign = getattr(model, self._foreign_key)
if key not in self._dictionary:
self._dictionary[key] = {}
if foreign not in self._dictionary[key]:
self._dictionary[key][foreign] = []
self._dictionary[key][foreign].append(model) | 0.003284 |
def project_ranges(cb, msg, attributes):
"""
Projects ranges supplied by a callback.
"""
if skip(cb, msg, attributes):
return msg
plot = get_cb_plot(cb)
x0, x1 = msg.get('x_range', (0, 1000))
y0, y1 = msg.get('y_range', (0, 1000))
extents = x0, y0, x1, y1
x0, y0, x1, y1 = project_extents(extents, plot.projection,
plot.current_frame.crs)
coords = {'x_range': (x0, x1), 'y_range': (y0, y1)}
return {k: v for k, v in coords.items() if k in attributes} | 0.001852 |
def get_file(self,
target_path,
host_path,
note=None,
loglevel=logging.DEBUG):
"""Copy a file from the target machine to the host machine
@param target_path: path to file in the target
@param host_path: path to file on the host machine (e.g. copy test)
@param note: See send()
@type target_path: string
@type host_path: string
@return: boolean
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
self.handle_note(note)
# Only handle for docker initially, return false in case we care
if self.build['delivery'] != 'docker':
return False
# on the host, run:
#Usage: docker cp [OPTIONS] CONTAINER:PATH LOCALPATH|-
# Need: host env, container id, path from and path to
shutit_pexpect_child = self.get_shutit_pexpect_session_from_id('host_child').pexpect_child
expect = self.expect_prompts['ORIGIN_ENV']
self.send('docker cp ' + self.target['container_id'] + ':' + target_path + ' ' + host_path,
shutit_pexpect_child=shutit_pexpect_child,
expect=expect,
check_exit=False,
echo=False,
loglevel=loglevel)
self.handle_note_after(note=note)
return True | 0.040316 |
def get_bounds(self):
"""
Get the parameters of bounding box of the UI element.
Returns:
:obj:`list` <:obj:`float`>: 4-list (top, right, bottom, left) coordinates related to the edge of screen in
NormalizedCoordinate system
"""
size = self.get_size()
top_left = self.get_position([0, 0])
# t, r, b, l
bounds = [top_left[1], top_left[0] + size[0], top_left[1] + size[1], top_left[0]]
return bounds | 0.008081 |
def fix_dashes(string):
"""Fix bad Unicode special dashes in string."""
string = string.replace(u'\u05BE', '-')
string = string.replace(u'\u1806', '-')
string = string.replace(u'\u2E3A', '-')
string = string.replace(u'\u2E3B', '-')
string = unidecode(string)
return re.sub(r'--+', '-', string) | 0.003115 |
def get_pythonpath(self, at_start=False):
"""Get project path as a list to be added to PYTHONPATH"""
if at_start:
current_path = self.get_option('current_project_path',
default=None)
else:
current_path = self.get_active_project_path()
if current_path is None:
return []
else:
return [current_path] | 0.004566 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self,
'configuration_id') and self.configuration_id is not None:
_dict['configuration_id'] = self.configuration_id
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(
self,
'status_description') and self.status_description is not None:
_dict['status_description'] = self.status_description
if hasattr(self, 'filename') and self.filename is not None:
_dict['filename'] = self.filename
if hasattr(self, 'file_type') and self.file_type is not None:
_dict['file_type'] = self.file_type
if hasattr(self, 'sha1') and self.sha1 is not None:
_dict['sha1'] = self.sha1
if hasattr(self, 'notices') and self.notices is not None:
_dict['notices'] = [x._to_dict() for x in self.notices]
return _dict | 0.001708 |
def spectrogram(source, nfft=512, overlap=90, window='hanning', caldb=93, calv=2.83):
"""
Produce a matrix of spectral intensity, uses matplotlib's specgram
function. Output is in dB scale.
:param source: filename of audiofile, or samplerate and vector of audio signal
:type source: str or (int, numpy.ndarray)
:param nfft: size of nfft window to use
:type nfft: int
:param overlap: percent overlap of window
:type overlap: number
:param window: Type of window to use, choices are hanning, hamming, blackman, bartlett or none (rectangular)
:type window: string
:returns: spec -- 2D array of intensities, freqs -- yaxis labels, bins -- time bin labels, duration -- duration of signal
"""
if isinstance(source, basestring):
fs, wavdata = audioread(source)
else:
fs, wavdata = source
# truncate to nears ms
duration = float(len(wavdata)) / fs
desired_npts = int((np.trunc(duration * 1000) / 1000) * fs)
# print 'LENGTH {}, DESIRED {}'.format(len(wavdata), desired_npts)
wavdata = wavdata[:desired_npts]
duration = len(wavdata) / fs
if VERBOSE:
amp = rms(wavdata, fs)
print 'RMS of input signal to spectrogram', amp
# normalize
if len(wavdata) > 0 and np.max(abs(wavdata)) != 0:
wavdata = wavdata / np.max(abs(wavdata))
if window == 'hanning':
winfnc = mlab.window_hanning
elif window == 'hamming':
winfnc = np.hamming(nfft)
elif window == 'blackman':
winfnc = np.blackman(nfft)
elif window == 'bartlett':
winfnc = np.bartlett(nfft)
elif window == None or window == 'none':
winfnc = mlab.window_none
noverlap = int(nfft * (float(overlap) / 100))
Pxx, freqs, bins = mlab.specgram(wavdata, NFFT=nfft, Fs=fs, noverlap=noverlap,
pad_to=nfft * 2, window=winfnc, detrend=mlab.detrend_none,
sides='default', scale_by_freq=False)
# log of zero is -inf, which is not great for plotting
Pxx[Pxx == 0] = np.nan
# convert to db scale for display
spec = 20. * np.log10(Pxx)
# set 0 to miniumum value in spec?
# would be great to have spec in db SPL, and set any -inf to 0
spec[np.isnan(spec)] = np.nanmin(spec)
return spec, freqs, bins, duration | 0.003406 |
def submit(recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip,
use_ssl=False):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field
from the form
recaptcha_response_field -- The value of recaptcha_response_field
from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len(recaptcha_response_field) and len(recaptcha_challenge_field)):
return RecaptchaResponse(
is_valid=False,
error_code='incorrect-captcha-sol'
)
if getattr(settings, "NOCAPTCHA", False):
params = urlencode({
'secret': want_bytes(private_key),
'response': want_bytes(recaptcha_response_field),
'remoteip': want_bytes(remoteip),
})
else:
params = urlencode({
'privatekey': want_bytes(private_key),
'remoteip': want_bytes(remoteip),
'challenge': want_bytes(recaptcha_challenge_field),
'response': want_bytes(recaptcha_response_field),
})
if not PY2:
params = params.encode('utf-8')
if use_ssl:
verify_url = 'https://%s/recaptcha/api/verify' % VERIFY_SERVER
else:
verify_url = 'http://%s/recaptcha/api/verify' % VERIFY_SERVER
if getattr(settings, "NOCAPTCHA", False):
verify_url = 'https://%s/recaptcha/api/siteverify' % VERIFY_SERVER
req = Request(
url=verify_url,
data=params,
headers={
'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'reCAPTCHA Python'
}
)
httpresp = urlopen(req)
if getattr(settings, "NOCAPTCHA", False):
data = json.loads(httpresp.read().decode('utf-8'))
return_code = data['success']
return_values = [return_code, None]
if return_code:
return_code = 'true'
else:
return_code = 'false'
else:
return_values = httpresp.read().splitlines()
return_code = return_values[0]
httpresp.close()
if (return_code == "true"):
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(is_valid=False, error_code=return_values[1]) | 0.00284 |
def module2upstream(mod):
"""Return a corresponding OpenStack upstream name for a python module.
mod -- python module name
"""
for rule in OPENSTACK_UPSTREAM_PKG_MAP:
pkglist = rule(mod, dist=None)
if pkglist:
return pkglist[0]
return mod | 0.003472 |
def source_path(cls, organization, source):
"""Return a fully-qualified source string."""
return google.api_core.path_template.expand(
"organizations/{organization}/sources/{source}",
organization=organization,
source=source,
) | 0.006969 |
def frame_update_count(self):
"""
The number of frames before this Layout should be updated.
"""
result = 1000000
for column in self._columns:
for widget in column:
if widget.frame_update_count > 0:
result = min(result, widget.frame_update_count)
return result | 0.005618 |
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve, revision=revision, package=package,
url_fmt=url_fmt) | 0.001757 |
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes`
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax | 0.000324 |
def truncate_graph_bbox(G, north, south, east, west, truncate_by_edge=False, retain_all=False):
"""
Remove every node in graph that falls outside a bounding box.
Needed because overpass returns entire ways that also include nodes outside
the bbox if the way (that is, a way with a single OSM ID) has a node inside
the bbox at some point.
Parameters
----------
G : networkx multidigraph
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
truncate_by_edge : bool
if True retain node if it's outside bbox but at least one of node's
neighbors are within bbox
retain_all : bool
if True, return the entire graph even if it is not connected
Returns
-------
networkx multidigraph
"""
start_time = time.time()
G = G.copy()
nodes_outside_bbox = []
for node, data in G.nodes(data=True):
if data['y'] > north or data['y'] < south or data['x'] > east or data['x'] < west:
# this node is outside the bounding box
if not truncate_by_edge:
# if we're not truncating by edge, add node to list of nodes
# outside the bounding box
nodes_outside_bbox.append(node)
else:
# if we're truncating by edge, see if any of node's neighbors
# are within bounding box
any_neighbors_in_bbox = False
neighbors = list(G.successors(node)) + list(G.predecessors(node))
for neighbor in neighbors:
x = G.nodes[neighbor]['x']
y = G.nodes[neighbor]['y']
if y < north and y > south and x < east and x > west:
any_neighbors_in_bbox = True
break
# if none of its neighbors are within the bounding box, add node
# to list of nodes outside the bounding box
if not any_neighbors_in_bbox:
nodes_outside_bbox.append(node)
G.remove_nodes_from(nodes_outside_bbox)
log('Truncated graph by bounding box in {:,.2f} seconds'.format(time.time()-start_time))
# remove any isolated nodes and retain only the largest component (if
# retain_all is True)
if not retain_all:
G = remove_isolated_nodes(G)
G = get_largest_component(G)
return G | 0.002343 |
def get_layout_as_string(layout):
"""
Take a dict or string and return a string.
The dict will be json dumped.
The string will json parsed to check for json validity. In order to deal
with strings which have been json encoded multiple times, keep json decoding
until a dict is retrieved or until a non-json structure is identified.
"""
if isinstance(layout, dict):
return json.dumps(layout)
if(isinstance(layout, six.string_types)):
try:
return get_layout_as_string(json.loads(layout))
except:
return layout | 0.006525 |
def _postloop_hook(self) -> None:
""" Stops the alerter thread """
# After this function returns, cmdloop() releases self.terminal_lock which could make the alerter
# thread think the prompt is on screen. Therefore this is the best place to stop the alerter thread.
# You can also stop it via a command. See do_stop_alerts().
self._stop_thread = True
if self._alerter_thread.is_alive():
self._alerter_thread.join() | 0.008421 |
def get_database_columns(self, tables=None, database=None):
"""Retrieve a dictionary of columns."""
# Get table data and columns from source database
source = database if database else self.database
tables = tables if tables else self.tables
return {tbl: self.get_columns(tbl) for tbl in tqdm(tables, total=len(tables),
desc='Getting {0} columns'.format(source))} | 0.008658 |
def reftrack_alien_data(rt, role):
"""Return the data for the alien status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the alien status
:rtype: depending on role
:raises: None
"""
alien = rt.alien()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if alien:
return "Yes"
else:
return "No" | 0.001838 |
def needs_to_be_resolved(parent_obj, attr_name):
"""
This function determines, if a reference (CrossReference) needs to be
resolved or not (while creating the model, while resolving references).
Args:
parent_obj: the object containing the attribute to be resolved.
attr_name: the attribute identification object.
Returns:
True if the attribute needs to be resolved. Else False.
In case of lists of references, this function return true if any of the
references in the list needs to be resolved.
Note: outside the model building process (from_file or from_str) this
function always returns False.
"""
if hasattr(get_model(parent_obj), "_tx_reference_resolver"):
return get_model(parent_obj)._tx_reference_resolver. \
has_unresolved_crossrefs(parent_obj, attr_name)
else:
return False | 0.00111 |
def send_http_request_with_json(context, method):
"""
Parameters:
.. code-block:: json
{
"param1": "value1",
"param2": "value2",
"param3": {
"param31": "value31"
}
}
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.body_params = json.loads(context.text)
context.http_request_context.renderer = JSONRenderer()
send_http_request(context, method) | 0.001887 |
def main(argv: Optional[Sequence[str]] = None) -> None:
"""Parse arguments and process the homework assignment."""
parser = ArgumentParser(description="Convert Jupyter Notebook assignments to PDFs")
parser.add_argument(
"--hw",
type=int,
required=True,
help="Homework number to convert",
dest="hw_num",
)
parser.add_argument(
"-p",
"--problems",
type=int,
help="Problem numbers to convert",
dest="problems",
nargs="*",
)
parser.add_argument(
"--by-hand",
type=int,
help="Problem numbers to be completed by hand",
dest="by_hand",
nargs="*",
)
args = parser.parse_args(argv)
prefix = Path(f"homework/homework-{args.hw_num}")
process(args.hw_num, args.problems, prefix=prefix, by_hand=args.by_hand) | 0.002307 |
def result(self, psd_state):
"""Return freqs and averaged PSD for given center frequency"""
freq_array = numpy.fft.fftshift(psd_state['freq_array'])
pwr_array = numpy.fft.fftshift(psd_state['pwr_array'])
if self._crop_factor:
crop_bins_half = round((self._crop_factor * self._bins) / 2)
freq_array = freq_array[crop_bins_half:-crop_bins_half]
pwr_array = pwr_array[crop_bins_half:-crop_bins_half]
if psd_state['repeats'] > 1:
pwr_array = pwr_array / psd_state['repeats']
if self._log_scale:
pwr_array = 10 * numpy.log10(pwr_array)
return (freq_array, pwr_array) | 0.002937 |
def mangle_signature(sig, max_chars=30):
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
# Strip strings (which can contain things that confuse the code below)
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\'", "", s)
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = []
opts = []
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars-2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars-len(sig)-4-2)
return u"(%s)" % sig | 0.000925 |
def _parse(self, command):
""" Parse a single command.
"""
cmd, id_, args = command[0], command[1], command[2:]
if cmd == 'CURRENT':
# This context is made current
self.env.clear()
self._gl_initialize()
self.env['fbo'] = args[0]
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, args[0])
elif cmd == 'FUNC':
# GL function call
args = [as_enum(a) for a in args]
try:
getattr(gl, id_)(*args)
except AttributeError:
logger.warning('Invalid gl command: %r' % id_)
elif cmd == 'CREATE':
# Creating an object
if args[0] is not None:
klass = self._classmap[args[0]]
self._objects[id_] = klass(self, id_)
else:
self._invalid_objects.add(id_)
elif cmd == 'DELETE':
# Deleting an object
ob = self._objects.get(id_, None)
if ob is not None:
self._objects[id_] = JUST_DELETED
ob.delete()
else:
# Doing somthing to an object
ob = self._objects.get(id_, None)
if ob == JUST_DELETED:
return
if ob is None:
if id_ not in self._invalid_objects:
raise RuntimeError('Cannot %s object %i because it '
'does not exist' % (cmd, id_))
return
# Triage over command. Order of commands is set so most
# common ones occur first.
if cmd == 'DRAW': # Program
ob.draw(*args)
elif cmd == 'TEXTURE': # Program
ob.set_texture(*args)
elif cmd == 'UNIFORM': # Program
ob.set_uniform(*args)
elif cmd == 'ATTRIBUTE': # Program
ob.set_attribute(*args)
elif cmd == 'DATA': # VertexBuffer, IndexBuffer, Texture
ob.set_data(*args)
elif cmd == 'SIZE': # VertexBuffer, IndexBuffer,
ob.set_size(*args) # Texture[1D, 2D, 3D], RenderBuffer
elif cmd == 'ATTACH': # FrameBuffer
ob.attach(*args)
elif cmd == 'FRAMEBUFFER': # FrameBuffer
ob.set_framebuffer(*args)
elif cmd == 'SHADERS': # Program
ob.set_shaders(*args)
elif cmd == 'WRAPPING': # Texture1D, Texture2D, Texture3D
ob.set_wrapping(*args)
elif cmd == 'INTERPOLATION': # Texture1D, Texture2D, Texture3D
ob.set_interpolation(*args)
else:
logger.warning('Invalid GLIR command %r' % cmd) | 0.000723 |
def wait_for_task(task_data, task_uri='/tasks'):
"""Run task and check the result.
Args:
task_data (str): the task json to execute
Returns:
str: Task status.
"""
taskid = post_task(task_data, task_uri)
if isinstance(task_data, str):
json_data = json.loads(task_data)
else:
json_data = task_data
# inspect the task to see if a timeout is configured
job = json_data['job'][0]
env = job.get('credentials')
task_type = job.get('type')
timeout = TASK_TIMEOUTS.get(env, dict()).get(task_type, DEFAULT_TASK_TIMEOUT)
LOG.debug("Task %s will timeout after %s", task_type, timeout)
return check_task(taskid, timeout) | 0.002849 |
def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False):
"""
Return list of all dirs and files inside given dir.
Also can filter contents to return only dirs or files.
Args:
- dir_name: Which directory we need to scan (relative)
- get_dirs: Return dirs list
- get_files: Return files list
- hide_ignored: Exclude files and dirs with initial underscore
"""
if get_dirs is None and get_files is None:
get_dirs = True
get_files = True
source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name)
dirs = []
for dir_or_file_name in os.listdir(source_dir):
path = os.path.join(source_dir, dir_or_file_name)
if hide_ignored and dir_or_file_name.startswith('_'):
continue
is_dir = os.path.isdir(path)
if get_dirs and is_dir or get_files and not is_dir:
dirs.append(dir_or_file_name)
return dirs | 0.001066 |
def make_purge_data(parser):
"""
Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph data from',
)
parser.set_defaults(
func=purgedata,
) | 0.003125 |
def to_file(self, output_filename):
'''Handles pdf and epub format.
Inpute: output_filename should have the proper extension.
Output: The name of the file created, or an IOError if failed'''
temp_file = NamedTemporaryFile(mode="w", suffix=".md", delete=False)
temp_file.write(self._content)
temp_file.close()
subprocess_arguments = [PANDOC_PATH, temp_file.name, '-o %s' % output_filename]
subprocess_arguments.extend(self.arguments)
cmd = " ".join(subprocess_arguments)
fin = os.popen(cmd)
msg = fin.read()
fin.close()
if msg:
print("Pandoc message: {}",format(msg))
os.remove(temp_file.name)
if exists(output_filename):
return output_filename
else:
raise IOError("Failed creating file: %s" % output_filename) | 0.004561 |
def is_opus_maximum(self):
"""Check whether the work is the author's opus maximum.
Two cases:
1. the work is flagged as opus max
2. there is only one work by this author
:return: boolean
"""
opmax = self._get_opus_maximum()
types = self.ecrm_P2_has_type
if opmax in types:
return True
else:
if len(self.author.get_works()) == 1:
return True
else:
return False | 0.003922 |
def create(self, create_info=None, hyperparameter=None, server='local', insights=False):
"""
Creates a new job in git and pushes it.
:param create_info: from the api.create_job_info(id). Contains the config and job info (type, server)
:param hyperparameter: simple nested dict with key->value, which overwrites stuff from aetros.yml
:param server: if None, the the job will be assigned to a server.
:param insights: whether you want to activate insights (for simple models)
"""
if not create_info:
create_info = {
'server': server,
'config': {
'insights': insights,
'command': ' '.join(sys.argv)
}
}
config = find_config(self.config_path, logger=self.logger)
if not config['model']:
raise Exception('AETROS config file (aetros.yml) not found.')
# first transform simple format in the full definition with parameter types
# (string, number, group, choice_group, etc)
full_hyperparameters = lose_parameters_to_full(config['parameters'])
# now extract hyperparameters from full definition, and overwrite stuff using
# incoming_hyperparameter if available
hyperparameter = extract_parameters(full_hyperparameters, hyperparameter)
create_info['config']['parameters'] = hyperparameter
self.job = create_info
if 'server' not in self.job and server:
# setting this disables server assignment
self.job['server'] = server
self.job['optimization'] = None
self.job['type'] = 'custom'
if 'parameters' not in self.job['config']:
self.job['config']['parameters'] = {}
if 'insights' not in self.job['config']:
self.job['config']['insights'] = insights
self.job['created'] = time.time()
self.git.create_job_id(self.job)
self.logger.debug("Job created with Git ref " + self.git.ref_head)
return self.job_id | 0.004699 |
def get_environmental_configuration(self):
"""
Gets the settings that describe the environmental configuration (supported feature set, calibrated minimum &
maximum power, location & dimensions, ...) of the enclosure resource.
Returns:
Settings that describe the environmental configuration.
"""
uri = '{}/environmentalConfiguration'.format(self.data['uri'])
return self._helper.do_get(uri) | 0.00655 |
def get_causal_central_nodes(graph: BELGraph, func: str) -> Set[BaseEntity]:
"""Return a set of all nodes that have both an in-degree > 0 and out-degree > 0.
This means that they are an integral part of a pathway, since they are both produced and consumed.
"""
return {
node
for node in graph
if node.function == func and is_causal_central(graph, node)
} | 0.007519 |
def simulate(self):
"""Generates a random integer in the available range."""
min_ = (-sys.maxsize - 1) if self._min is None else self._min
max_ = sys.maxsize if self._max is None else self._max
return random.randint(min_, max_) | 0.007722 |
def _influxdb_url(self):
""" Return REST API URL to access time series.
"""
url = "{0}/db/{1}/series".format(self.influxdb.url.rstrip('/'), self.config.dbname)
if self.influxdb.user and self.influxdb.password:
url += "?u={0}&p={1}".format(self.influxdb.user, self.influxdb.password)
return url | 0.011527 |
def get_readme():
"""Generate long description"""
pandoc = None
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
pandoc = os.path.join(path, 'pandoc')
if os.path.isfile(pandoc) and os.access(pandoc, os.X_OK):
break
else:
pandoc = None
try:
if pandoc:
cmd = [pandoc, '-t', 'rst', 'README.md']
long_description = os.popen(' '.join(cmd)).read()
else:
raise ValueError
except BaseException:
long_description = open("README.md").read()
return long_description | 0.001618 |
def get(self):
"""
Returns the next connection in this pool that is ready to be
reused. Returns None of there aren't any.
"""
# Discard ready connections that are too old.
self.clean()
# Return the first connection that is ready, and remove it
# from the queue. Connections that aren't ready are returned
# to the end of the queue with an updated time, on the
# assumption that somebody is actively reading the response.
for _ in range(len(self.queue)):
(conn, _) = self.queue.pop(0)
if self._conn_ready(conn):
return conn
else:
self.put(conn)
return None | 0.00277 |
def get_namespace_preorder_burn_info( outputs ):
"""
Given the set of outputs, find the fee sent
to our burn address.
Return the fee and burn address on success as {'op_fee': ..., 'burn_address': ...}
Return None if not found
"""
if len(outputs) < 3:
# not a well-formed preorder
return None
op_fee = outputs[2]['value']
burn_address = None
try:
burn_address = virtualchain.script_hex_to_address(outputs[2]['script'])
assert burn_address
except:
log.warning("Invalid burn script: {}".format(outputs[2]['script']))
return None
return {'op_fee': op_fee, 'burn_address': burn_address} | 0.014472 |
def _set_sub_prop(container, keys, value):
"""Set a nested value in a dictionary.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to set the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
value (object): Value to set within the container.
Examples:
Set a top-level value (equivalent to ``container['key'] = 'value'``).
>>> container = {}
>>> _set_sub_prop(container, ['key'], 'value')
>>> container
{'key': 'value'}
Set a nested value.
>>> container = {}
>>> _set_sub_prop(container, ['key', 'subkey'], 'value')
>>> container
{'key': {'subkey': 'value'}}
Replace a nested value.
>>> container = {'key': {'subkey': 'prev'}}
>>> _set_sub_prop(container, ['key', 'subkey'], 'new')
>>> container
{'key': {'subkey': 'new'}}
"""
sub_val = container
for key in keys[:-1]:
if key not in sub_val:
sub_val[key] = {}
sub_val = sub_val[key]
sub_val[keys[-1]] = value | 0.000734 |
def create_token(self, obj_id, extra_data):
"""Create a token referencing the object id with extra data.
Note random data is added to ensure that no two tokens are identical.
"""
return self.dumps(
dict(
id=obj_id,
data=extra_data,
rnd=binascii.hexlify(os.urandom(4)).decode('utf-8')
)
) | 0.005 |
def case(self, case_id=None, institute_id=None, display_name=None):
"""Fetches a single case from database
Use either the _id or combination of institute_id and display_name
Args:
case_id(str): _id for a caes
institute_id(str):
display_name(str)
Yields:
A single Case
"""
query = {}
if case_id:
query['_id'] = case_id
LOG.info("Fetching case %s", case_id)
else:
if not (institute_id and display_name):
raise ValueError("Have to provide both institute_id and display_name")
LOG.info("Fetching case %s institute %s", display_name, institute_id)
query['owner'] = institute_id
query['display_name'] = display_name
return self.case_collection.find_one(query) | 0.004624 |
def __insert_data(self):
"""!
@brief Inserts input data to the tree.
@remark If number of maximum number of entries is exceeded than diameter is increased and tree is rebuilt.
"""
for index_point in range(0, len(self.__pointer_data)):
point = self.__pointer_data[index_point];
self.__tree.insert_cluster( [ point ] );
if (self.__tree.amount_entries > self.__entry_size_limit):
self.__tree = self.__rebuild_tree(index_point); | 0.024605 |
def inverse(self, encoded, duration=None):
'''Inverse static tag transformation'''
ann = jams.Annotation(namespace=self.namespace, duration=duration)
if np.isrealobj(encoded):
detected = (encoded >= 0.5)
else:
detected = encoded
for vd in self.encoder.inverse_transform(np.atleast_2d(detected))[0]:
vid = np.flatnonzero(self.encoder.transform(np.atleast_2d(vd)))
ann.append(time=0,
duration=duration,
value=vd,
confidence=encoded[vid])
return ann | 0.003257 |
def get(self, sid):
"""
Constructs a BindingContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.notify.v1.service.binding.BindingContext
:rtype: twilio.rest.notify.v1.service.binding.BindingContext
"""
return BindingContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) | 0.007752 |
def _ExtractPathSpecsFromFileSystem(
self, path_spec, find_specs=None, recurse_file_system=True,
resolver_context=None):
"""Extracts path specification from a file system within a specific source.
Args:
path_spec (dfvfs.PathSpec): path specification of the root of
the file system.
find_specs (Optional[list[dfvfs.FindSpec]]): find specifications.
recurse_file_system (Optional[bool]): True if extraction should
recurse into a file system.
resolver_context (Optional[dfvfs.Context]): resolver context.
Yields:
dfvfs.PathSpec: path specification of a file entry found in
the file system.
"""
try:
file_system = path_spec_resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=resolver_context)
except (
dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.error(
'Unable to open file system with error: {0!s}'.format(exception))
return
try:
if find_specs:
searcher = file_system_searcher.FileSystemSearcher(
file_system, path_spec)
for extracted_path_spec in searcher.Find(find_specs=find_specs):
yield extracted_path_spec
elif recurse_file_system:
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
if file_entry:
for extracted_path_spec in self._ExtractPathSpecsFromDirectory(
file_entry):
yield extracted_path_spec
else:
yield path_spec
except (
dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.warning('{0!s}'.format(exception))
finally:
file_system.Close() | 0.007821 |
def hinit(func, x, t, pos_neg, f0, iord, hmax, rtol, atol, args):
"""
Estimate initial step size
"""
sk = atol + rtol * np.fabs(x)
dnf = np.sum(np.square(f0 / sk), axis=0)
dny = np.sum(np.square(x / sk), axis=0)
h = np.sqrt(dny / dnf) * 0.01
h = np.min([h, np.fabs(hmax)])
h = custom_sign(h, pos_neg)
# perform an explicit Euler step
xx1 = x + h * f0
f1 = np.array(func(xx1, t[0] + h, *args))
# estimate the second derivative of the solution
der2 = np.sum(np.square((f1 - f0) / sk), axis=0)
der2 = np.sqrt(der2) / h
# step size is computed such that h ** iord * max_d(norm(f0), norm(der2)) = 0.01
der12 = np.max([np.fabs(der2), np.sqrt(dnf)])
h1 = np.power(0.01 / der12, 1.0 / iord)
h = np.min([100.0 * np.fabs(h), np.min([np.fabs(h1), np.fabs(hmax)])])
return custom_sign(h, pos_neg), f0, f1, xx1 | 0.002262 |
def tag(check, version, push, dry_run):
"""Tag the HEAD of the git repo with the current release number for a
specific check. The tag is pushed to origin by default.
You can tag everything at once by setting the check to `all`.
Notice: specifying a different version than the one in __about__.py is
a maintenance task that should be run under very specific circumstances
(e.g. re-align an old release performed on the wrong commit).
"""
tagging_all = check == 'all'
valid_checks = get_valid_checks()
if not tagging_all and check not in valid_checks:
abort('Check `{}` is not an Agent-based Integration'.format(check))
if tagging_all:
if version:
abort('You cannot tag every check with the same version')
checks = sorted(valid_checks)
else:
checks = [check]
# Check for any new tags
tagged = False
for check in checks:
echo_info('{}:'.format(check))
# get the current version
if not version:
version = get_version_string(check)
# get the tag name
release_tag = get_release_tag_string(check, version)
echo_waiting('Tagging HEAD with {}... '.format(release_tag), indent=True, nl=False)
if dry_run:
version = None
click.echo()
continue
result = git_tag(release_tag, push)
if result.code == 128 or 'already exists' in result.stderr:
echo_warning('already exists')
elif result.code != 0:
abort('\n{}{}'.format(result.stdout, result.stderr), code=result.code)
else:
tagged = True
echo_success('success!')
# Reset version
version = None
if not tagged:
abort(code=2) | 0.00168 |
def sibling(self, name: InstanceName) -> "ObjectMember":
"""Return an instance node corresponding to a sibling member.
Args:
name: Instance name of the sibling member.
Raises:
NonexistentSchemaNode: If member `name` is not permitted by the
schema.
NonexistentInstance: If sibling member `name` doesn't exist.
"""
ssn = self.parinst._member_schema_node(name)
try:
sibs = self.siblings.copy()
newval = sibs.pop(name)
sibs[self.name] = self.value
return ObjectMember(name, sibs, newval, self.parinst,
ssn, self.timestamp)
except KeyError:
raise NonexistentInstance(self.json_pointer(),
f"member '{name}'") from None | 0.002347 |
def process_response(self, request, response):
"""Commits and leaves transaction management."""
if tldap.transaction.is_managed():
tldap.transaction.commit()
tldap.transaction.leave_transaction_management()
return response | 0.007407 |
def get_data(self):
"""
attempt to read measurements file in working directory.
"""
meas_file = os.path.join(self.WD, 'magic_measurements.txt')
if not os.path.isfile(meas_file):
print("-I- No magic_measurements.txt file")
return {}
try:
meas_data, file_type = pmag.magic_read(meas_file)
except IOError:
print("-I- No magic_measurements.txt file")
return {}
if file_type == 'bad_file':
print("-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.")
old_specimen_name = ''
#start_time = time.time()
meas_name_list = [measurement.name for measurement in self.measurements]
for rec in meas_data:
# get citation information
citation = rec.get('er_citation_names', 'This study')
if 'This study' not in citation:
citation = citation.strip() + ':This study'
er_data = {'er_citation_names': citation}
pmag_data = {'er_citation_names': 'This study'}
specimen_name = rec["er_specimen_name"]
# ignore measurement if there is no specimen
if specimen_name == "" or specimen_name == " ":
continue
# if we've moved onto a new specimen, make sure a sample/site/location
# exists for that specimen
if specimen_name != old_specimen_name:
sample_name = rec["er_sample_name"]
site_name = rec["er_site_name"]
location_name = rec["er_location_name"]
# add items and parents
location = self.find_by_name(location_name, self.locations)
if location_name and not location:
location = self.add_location(location_name, er_data=er_data,
pmag_data=pmag_data)
site = self.find_by_name(site_name, self.sites)
if site_name and not site:
site = self.add_site(site_name, location_name,
er_data, pmag_data)
sample = self.find_by_name(sample_name, self.samples)
if sample_name and not sample:
sample = self.add_sample(sample_name, site_name,
er_data, pmag_data)
specimen = self.find_by_name(specimen_name, self.specimens)
if specimen_name and not specimen:
specimen = self.add_specimen(specimen_name, sample_name,
er_data, pmag_data)
# add child_items
if sample and not self.find_by_name(specimen_name, sample.specimens):
sample.specimens.append(specimen)
if site and not self.find_by_name(sample_name, site.samples):
site.samples.append(sample)
if location and not self.find_by_name(site_name, location.sites):
location.sites.append(site)
exp_name = rec['magic_experiment_name']
meas_num = rec['measurement_number']
meas_name = exp_name + '_' + str(meas_num)
measurement = self.find_by_name(meas_name, self.measurements, meas_name_list)
if not measurement:
self.add_measurement(exp_name, meas_num, specimen.name, rec)
meas_name_list.append(meas_name)
old_specimen_name = specimen_name | 0.002789 |
def assignrepr(self, prefix: str) -> str:
"""Return a |repr| string with a prefixed assignment."""
with objecttools.repr_.preserve_strings(True):
with hydpy.pub.options.ellipsis(2, optional=True):
with objecttools.assignrepr_tuple.always_bracketed(False):
classname = objecttools.classname(self)
blanks = ' ' * (len(prefix+classname) + 1)
nodestr = objecttools.assignrepr_tuple(
self.nodes.names, blanks+'nodes=', 70)
elementstr = objecttools.assignrepr_tuple(
self.elements.names, blanks + 'elements=', 70)
return (f'{prefix}{classname}("{self.name}",\n'
f'{nodestr},\n'
f'{elementstr})') | 0.002389 |
def _retrieve_and_validate_certificate_chain(self, cert_url):
# type: (str) -> Certificate
"""Retrieve and validate certificate chain.
This method validates if the URL is valid and loads and
validates the certificate chain, before returning it.
:param cert_url: URL for retrieving certificate chain
:type cert_url: str
:return The certificate chain loaded from the URL
:rtype cryptography.x509.Certificate
:raises: :py:class:`VerificationException` if the URL is invalid,
if the loaded certificate chain is invalid
"""
self._validate_certificate_url(cert_url)
cert_chain = self._load_cert_chain(cert_url)
self._validate_cert_chain(cert_chain)
return cert_chain | 0.003812 |
def _get_bandgap_doscar(filename):
"""Get the bandgap from the DOSCAR file"""
with open(filename) as fp:
for i in range(6):
l = fp.readline()
efermi = float(l.split()[3])
step1 = fp.readline().split()[0]
step2 = fp.readline().split()[0]
step_size = float(step2)-float(step1)
not_found = True
while not_found:
l = fp.readline().split()
e = float(l.pop(0))
dens = 0.0
for i in range(int(len(l)/2)):
dens += float(l[i])
if e < efermi and dens > 1e-3:
bot = e
elif e > efermi and dens > 1e-3:
top = e
not_found = False
if top - bot < step_size*2:
bandgap = 0.0
else:
bandgap = float(top - bot)
return bandgap | 0.004162 |
def histogram(self, tag, values, bins, step=None):
"""Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step
"""
if step is None:
step = self._step
else:
self._step = step
values = onp.array(values)
bins = onp.array(bins)
values = onp.reshape(values, -1)
counts, limits = onp.histogram(values, bins=bins)
# boundary logic
cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))
start, end = onp.searchsorted(
cum_counts, [0, cum_counts[-1] - 1], side='right')
start, end = int(start), int(end) + 1
counts = (
counts[start -
1:end] if start > 0 else onp.concatenate([[0], counts[:end]]))
limits = limits[start:end + 1]
sum_sq = values.dot(values)
histo = HistogramProto(
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist())
summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])
self.add_summary(summary, step) | 0.002358 |
def get_config_groups(self, groups_conf, groups_pillar_name):
'''
get info from groups in config, and from the named pillar
todo: add specification for the minion to use to recover pillar
'''
# Get groups
# Default to returning something that'll never match
ret_groups = {
'default': {
'users': set(),
'commands': set(),
'aliases': {},
'default_target': {},
'targets': {}
}
}
# allow for empty groups in the config file, and instead let some/all of this come
# from pillar data.
if not groups_conf:
use_groups = {}
else:
use_groups = groups_conf
# First obtain group lists from pillars, then in case there is any overlap, iterate over the groups
# that come from pillars. The configuration in files on disk/from startup
# will override any configs from pillars. They are meant to be complementary not to provide overrides.
log.debug('use_groups %s', use_groups)
try:
groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items())
except AttributeError:
log.warning('Failed to get groups from %s: %s or from config: %s',
groups_pillar_name,
self._groups_from_pillar(groups_pillar_name),
use_groups
)
groups_gen = []
for name, config in groups_gen:
log.info('Trying to get %s and %s to be useful', name, config)
ret_groups.setdefault(name, {
'users': set(), 'commands': set(), 'aliases': {},
'default_target': {}, 'targets': {}
})
try:
ret_groups[name]['users'].update(set(config.get('users', [])))
ret_groups[name]['commands'].update(set(config.get('commands', [])))
ret_groups[name]['aliases'].update(config.get('aliases', {}))
ret_groups[name]['default_target'].update(config.get('default_target', {}))
ret_groups[name]['targets'].update(config.get('targets', {}))
except (IndexError, AttributeError):
log.warning("Couldn't use group %s. Check that targets is a dictionary and not a list", name)
log.debug('Got the groups: %s', ret_groups)
return ret_groups | 0.005638 |
def parameters(self):
"""Parameter declaration lines."""
lines = Lines()
lines.add(0, '@cython.final')
lines.add(0, 'cdef class Parameters(object):')
for subpars in self.model.parameters:
if subpars:
lines.add(1, 'cdef public %s %s'
% (objecttools.classname(subpars), subpars.name))
for subpars in self.model.parameters:
if subpars:
print(' - %s' % subpars.name)
lines.add(0, '@cython.final')
lines.add(0, 'cdef class %s(object):'
% objecttools.classname(subpars))
for par in subpars:
try:
ctype = TYPE2STR[par.TYPE] + NDIM2STR[par.NDIM]
except KeyError:
ctype = par.TYPE + NDIM2STR[par.NDIM]
lines.add(1, 'cdef public %s %s' % (ctype, par.name))
return lines | 0.002016 |
def _save_owner_cover_photo(session, hash, photo):
"""
https://vk.com/dev/photos.saveOwnerCoverPhoto
"""
response = session.fetch('photos.saveOwnerCoverPhoto', hash=hash, photo=photo)
return response | 0.012552 |
def register_func_list(self, func_and_handler):
""" register a function to determine if the handle
should be used for the type
"""
for func, handler in func_and_handler:
self._function_dispatch.register(func, handler)
self.dispatch.cache_clear() | 0.006645 |
def from_element(self, element, defaults={}):
"""Populate object variables from SVD element"""
if isinstance(defaults, SvdElement):
defaults = vars(defaults)
for key in self.props:
try:
value = element.find(key).text
except AttributeError: # Maybe it's attribute?
default = defaults[key] if key in defaults else None
value = element.get(key, default)
if value is not None:
if key in self.props_to_integer:
try:
value = int(value)
except ValueError: # It has to be hex
value = int(value, 16)
elif key in self.props_to_boolean:
value = value.lower() in ("yes", "true", "t", "1")
setattr(self, key, value) | 0.002281 |
def on_diff(request, page_name):
"""Show the diff between two revisions."""
old = request.args.get("old", type=int)
new = request.args.get("new", type=int)
error = ""
diff = page = old_rev = new_rev = None
if not (old and new):
error = "No revisions specified."
else:
revisions = dict(
(x.revision_id, x)
for x in Revision.query.filter(
(Revision.revision_id.in_((old, new)))
& (Revision.page_id == Page.page_id)
& (Page.name == page_name)
)
)
if len(revisions) != 2:
error = "At least one of the revisions requested does not exist."
else:
new_rev = revisions[new]
old_rev = revisions[old]
page = old_rev.page
diff = unified_diff(
(old_rev.text + "\n").splitlines(True),
(new_rev.text + "\n").splitlines(True),
page.name,
page.name,
format_datetime(old_rev.timestamp),
format_datetime(new_rev.timestamp),
3,
)
return Response(
generate_template(
"action_diff.html",
error=error,
old_revision=old_rev,
new_revision=new_rev,
page=page,
diff=diff,
)
) | 0.000724 |
def determine_eigen_directions(metricParams, preserveMoments=False,
vary_fmax=False, vary_density=None):
"""
This function will calculate the coordinate transfomations that are needed
to rotate from a coordinate system described by the various Lambda
components in the frequency expansion, to a coordinate system where the
metric is Cartesian.
Parameters
-----------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric.
preserveMoments : boolean, optional (default False)
Currently only used for debugging.
If this is given then if the moments structure is already set
within metricParams then they will not be recalculated.
vary_fmax : boolean, optional (default False)
If set to False the metric and rotations are calculated once, for the
full range of frequency [f_low,f_upper).
If set to True the metric and rotations are calculated multiple times,
for frequency ranges [f_low,f_low + i*vary_density), where i starts at
1 and runs up until f_low + (i+1)*vary_density > f_upper.
Thus values greater than f_upper are *not* computed.
The calculation for the full range [f_low,f_upper) is also done.
vary_density : float, optional
If vary_fmax is True, this will be used in computing the frequency
ranges as described for vary_fmax.
Returns
--------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric.
**THIS FUNCTION ONLY RETURNS THE CLASS**
The following will be **added** to this structure
metricParams.evals : Dictionary of numpy.array
Each entry in the dictionary corresponds to the different frequency
ranges described in vary_fmax. If vary_fmax = False, the only entry
will be f_upper, this corresponds to integrals in [f_low,f_upper). This
entry is always present. Each other entry will use floats as keys to
the dictionary. These floats give the upper frequency cutoff when it is
varying.
Each numpy.array contains the eigenvalues which, with the eigenvectors
in evecs, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
metricParams.evecs : Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the eigenvectors which, with the eigenvalues
in evals, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
metricParams.metric : Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the metric of the parameter space in the
Lambda_i coordinate system.
metricParams.moments : Moments structure
See the structure documentation for a description of this. This
contains the result of all the integrals used in computing the metrics
above. It can be used for the ethinca components calculation, or other
similar calculations.
"""
evals = {}
evecs = {}
metric = {}
unmax_metric = {}
# First step is to get the moments needed to calculate the metric
if not (metricParams.moments and preserveMoments):
get_moments(metricParams, vary_fmax=vary_fmax,
vary_density=vary_density)
# What values are going to be in the moments
# J7 is the normalization factor so it *MUST* be present
list = metricParams.moments['J7'].keys()
# We start looping over every item in the list of metrics
for item in list:
# Here we convert the moments into a form easier to use here
Js = {}
for i in range(-7,18):
Js[i] = metricParams.moments['J%d'%(i)][item]
logJs = {}
for i in range(-1,18):
logJs[i] = metricParams.moments['log%d'%(i)][item]
loglogJs = {}
for i in range(-1,18):
loglogJs[i] = metricParams.moments['loglog%d'%(i)][item]
logloglogJs = {}
for i in range(-1,18):
logloglogJs[i] = metricParams.moments['logloglog%d'%(i)][item]
loglogloglogJs = {}
for i in range(-1,18):
loglogloglogJs[i] = metricParams.moments['loglogloglog%d'%(i)][item]
mapping = generate_mapping(metricParams.pnOrder)
# Calculate the metric
gs, unmax_metric_curr = calculate_metric(Js, logJs, loglogJs,
logloglogJs, loglogloglogJs, mapping)
metric[item] = numpy.matrix(gs)
unmax_metric[item] = unmax_metric_curr
# And the eigenvalues
evals[item],evecs[item] = numpy.linalg.eig(gs)
# Numerical error can lead to small negative eigenvalues.
for i in range(len(evals[item])):
if evals[item][i] < 0:
# Due to numerical imprecision the very small eigenvalues can
# be negative. Make these positive.
evals[item][i] = -evals[item][i]
if evecs[item][i,i] < 0:
# We demand a convention that all diagonal terms in the matrix
# of eigenvalues are positive.
# This is done to help visualization of the spaces (increasing
# mchirp always goes the same way)
evecs[item][:,i] = - evecs[item][:,i]
metricParams.evals = evals
metricParams.evecs = evecs
metricParams.metric = metric
metricParams.time_unprojected_metric = unmax_metric
return metricParams | 0.002973 |
def annotate(self):
"""
Returns a list of three element tuples with lineno,changeset and line
"""
if self.changeset is None:
raise NodeError('Unable to get changeset for this FileNode')
return self.changeset.get_file_annotate(self.path) | 0.006944 |
def get_tagged_artists(self, tag, limit=None):
"""Returns the artists tagged by a user."""
params = self._get_params()
params["tag"] = tag
params["taggingtype"] = "artist"
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + ".getpersonaltags", True, params)
return _extract_artists(doc, self.network) | 0.005155 |
def add_data(self, data_id=None, default_value=EMPTY, initial_dist=0.0,
wait_inputs=False, wildcard=None, function=None, callback=None,
description=None, filters=None, await_result=None, **kwargs):
"""
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update(_call_kw(locals()))
self.deferred.append(('add_data', kwargs))
return self | 0.002057 |
def trace_debug_dispatch(self, frame, event, arg):
"""Utility function to add debug to tracing"""
trace_log.info(
'Frame:%s. Event: %s. Arg: %r' % (pretty_frame(frame), event, arg)
)
trace_log.debug(
'state %r breaks ? %s stops ? %s' % (
self.state, self.breaks(frame, no_remove=True),
self.state.stops(frame, event)
)
)
if event == 'return':
trace_log.debug(
'Return: frame: %s, state: %s, state.f_back: %s' % (
pretty_frame(frame), pretty_frame(self.state.frame),
pretty_frame(self.state.frame.f_back)
)
)
if self.trace_dispatch(frame, event, arg):
return self.trace_debug_dispatch
trace_log.debug("No trace %s" % pretty_frame(frame)) | 0.002283 |
def exception_handler(self, ex): # pylint: disable=no-self-use
""" The default exception handler """
if isinstance(ex, CLIError):
logger.error(ex)
else:
logger.exception(ex)
return 1 | 0.008368 |
def validate(self, value):
"""Validate field value."""
if value is not None:
if not isinstance(value, list):
raise ValidationError("field must be a list")
for index, element in enumerate(value):
try:
self.inner.validate(element)
except ValidationError as error:
raise ValidationError("invalid element {}: {}".format(
index,
error.args[0],
))
super().validate(value) | 0.003503 |
def s_supply(self, bus):
""" Returns the total complex power generation capacity.
"""
Sg = array([complex(g.p, g.q) for g in self.generators if
(g.bus == bus) and not g.is_load], dtype=complex64)
if len(Sg):
return sum(Sg)
else:
return 0 + 0j | 0.006135 |
def get_build_logs_zip(self, project, build_id, **kwargs):
"""GetBuildLogsZip.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.0',
route_values=route_values,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) | 0.004931 |
def save(self, filename):
'''save fence points to a file'''
f = open(filename, mode='w')
for p in self.points:
f.write("%f\t%f\n" % (p.lat, p.lng))
f.close() | 0.00995 |
def serialized(f):
"""Decorator that serializes access to all decorated functions.
The decorator acquires pyspotify's single global lock while calling any
wrapped function. It is used to serialize access to:
- All calls to functions on :attr:`spotify.lib`.
- All code blocks working on pointers returned from functions on
:attr:`spotify.lib`.
- All code blocks working on other internal data structures in pyspotify.
Together this is what makes pyspotify safe to use from multiple threads and
enables convenient features like the :class:`~spotify.EventLoop`.
Internal function.
"""
import functools
@functools.wraps(f)
def wrapper(*args, **kwargs):
with _lock:
return f(*args, **kwargs)
if not hasattr(wrapper, '__wrapped__'):
# Workaround for Python < 3.2
wrapper.__wrapped__ = f
return wrapper | 0.001106 |
def fit(self, choosers, alternatives, current_choice):
"""
Fit and save models based on given data after segmenting
the `choosers` table. Segments that have not already been explicitly
added will be automatically added with default model.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column with the same name as the .segmentation_col
attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice
Name of column in `choosers` that indicates which alternative
they have currently chosen.
Returns
-------
log_likelihoods : dict of dict
Keys will be model names and values will be dictionaries of
log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
"""
logger.debug('start: fit models in segmented LCM {}'.format(self.name))
choosers, alternatives = self.apply_fit_filters(choosers, alternatives)
unique = choosers[self.segmentation_col].unique()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models:
self.add_segment(x)
results = self._group.fit(choosers, alternatives, current_choice)
logger.debug(
'finish: fit models in segmented LCM {}'.format(self.name))
return results | 0.001034 |
def _cursor_helper(self, document_fields, before, start):
"""Set values to be used for a ``start_at`` or ``end_at`` cursor.
The values will later be used in a query protobuf.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
before (bool): Flag indicating if the document in
``document_fields`` should (:data:`False`) or
shouldn't (:data:`True`) be included in the result set.
start (Optional[bool]): determines if the cursor is a ``start_at``
cursor (:data:`True`) or an ``end_at`` cursor (:data:`False`).
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start at" cursor.
"""
if isinstance(document_fields, tuple):
document_fields = list(document_fields)
elif isinstance(document_fields, document.DocumentSnapshot):
if document_fields.reference._path[:-1] != self._parent._path:
raise ValueError(
"Cannot use snapshot from another collection as a cursor."
)
else:
# NOTE: We copy so that the caller can't modify after calling.
document_fields = copy.deepcopy(document_fields)
cursor_pair = document_fields, before
query_kwargs = {
"projection": self._projection,
"field_filters": self._field_filters,
"orders": self._orders,
"limit": self._limit,
"offset": self._offset,
}
if start:
query_kwargs["start_at"] = cursor_pair
query_kwargs["end_at"] = self._end_at
else:
query_kwargs["start_at"] = self._start_at
query_kwargs["end_at"] = cursor_pair
return self.__class__(self._parent, **query_kwargs) | 0.000833 |
def __display_left(self, stat_display):
"""Display the left sidebar in the Curses interface."""
self.init_column()
if self.args.disable_left_sidebar:
return
for s in self._left_sidebar:
if ((hasattr(self.args, 'enable_' + s) or
hasattr(self.args, 'disable_' + s)) and s in stat_display):
self.new_line()
self.display_plugin(stat_display[s]) | 0.004474 |
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
string = "gate " + self.name
if self.arguments is not None:
string += "(" + self.arguments.qasm(prec) + ")"
string += " " + self.bitlist.qasm(prec) + "\n"
string += "{\n" + self.body.qasm(prec) + "}"
return string | 0.00578 |
def acceptEdit( self ):
"""
Accepts the current text and rebuilds the parts widget.
"""
if ( self._partsWidget.isVisible() ):
return False
use_completion = self.completer().popup().isVisible()
completion = self.completer().currentCompletion()
self._completerTree.hide()
self.completer().popup().hide()
if ( use_completion ):
self.setText(completion)
else:
self.rebuild()
return True | 0.025045 |
def may_add_vlan(packet, vlan_id):
"""
:type packet: ryu.lib.packet.packet.Packet
:param packet:
:type vlan_id: int (0 <= vlan_id <= 4095) or None (= No VLAN)
:param vlan_id:
"""
if vlan_id is None:
return
e = packet.protocols[0]
assert isinstance(e, ethernet.ethernet)
v = vlan.vlan(0, 0, vlan_id, e.ethertype)
e.ethertype = ether.ETH_TYPE_8021Q
packet.add_protocol(v) | 0.002347 |
def arrays2wcxf(C):
"""Convert a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values to a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values. This is needed for the output in WCxf format."""
d = {}
for k, v in C.items():
if np.shape(v) == () or np.shape(v) == (1,):
d[k] = v
else:
ind = np.indices(v.shape).reshape(v.ndim, v.size).T
for i in ind:
name = k + '_' + ''.join([str(int(j) + 1) for j in i])
d[name] = v[tuple(i)]
return d | 0.001555 |
def normalize(self, decl_string, arg_separator=None):
"""implementation details"""
if not self.has_pattern(decl_string):
return decl_string
name, args = self.split(decl_string)
for i, arg in enumerate(args):
args[i] = self.normalize(arg)
return self.join(name, args, arg_separator) | 0.005797 |
def _get_inferred_data_column(column):
"""
Calculate the m/m/m/m for column values.
:param dict column: Column data
:return dict column: Column data - modified
"""
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Get the values for this column
values = column["values"]
# Make sure that age and values are numpy arrays
_values = np.array(copy.copy(values), dtype=float)
# If we have values, keep going
if len(_values) != 0:
# Remove the NaNs from the values list.
_values = _values[np.where(~np.isnan(_values))[0]]
# Use the values to create new entries and data
column.update(__get_inferred_data_res_2(_values))
# Even though we're not calculating resolution, still add it with "NaN" placeholders.
column["hasResolution"] = __get_inferred_data_res_2(None, calc=False)
except KeyError as e:
logger_inferred_data.debug("get_inferred_data_column: KeyError: {}".format(e))
except Exception as e:
logger_inferred_data.debug("get_inferred_data_column: Exception: {}".format(e))
return column | 0.004023 |
def start_state_id(self):
""" The start state is the state to which the first transition goes to.
The setter-method creates a unique first transition to the state with the given id.
Existing first transitions are removed. If the given state id is None, the first transition is removed.
:return: The id of the start state
"""
for transition_id in self.transitions:
if self.transitions[transition_id].from_state is None:
to_state = self.transitions[transition_id].to_state
if to_state is not None:
return to_state
else:
return self.state_id
return None | 0.00565 |
def _get_sync_model_vars_op(self):
"""
Get the op to sync local model_variables to PS.
"""
ops = []
for (shadow_v, local_v) in self._shadow_model_vars:
ops.append(shadow_v.assign(local_v.read_value()))
assert len(ops)
return tf.group(*ops, name='sync_{}_model_variables_to_ps'.format(len(ops))) | 0.008287 |
def _order(self, value, is_reverse=None):
"""Parsing data to a sortable form
By giving each data type an ID(int), and assemble with the value
into a sortable tuple.
"""
def _dict_parser(dict_doc):
""" dict ordered by:
valueType_N -> key_N -> value_N
"""
result = list()
for key in dict_doc:
data = self._order(dict_doc[key])
res = (data[0], key, data[1])
result.append(res)
return tuple(result)
def _list_parser(list_doc):
"""list will iter members to compare
"""
result = list()
for member in list_doc:
result.append(self._order(member))
return result
# (TODO) include more data type
if value is None or not isinstance(value, (dict,
list,
basestring,
bool,
float,
int)):
# not support/sortable value type
value = (0, None)
elif isinstance(value, bool):
value = (5, value)
elif isinstance(value, (int, float)):
value = (1, value)
elif isinstance(value, basestring):
value = (2, value)
elif isinstance(value, dict):
value = (3, _dict_parser(value))
elif isinstance(value, list):
if len(value) == 0:
# [] less then None
value = [(-1, [])]
else:
value = _list_parser(value)
if is_reverse is not None:
# list will firstly compare with other doc by it's smallest
# or largest member
value = max(value) if is_reverse else min(value)
else:
# if the smallest or largest member is a list
# then compaer with it's sub-member in list index order
value = (4, tuple(value))
return value | 0.000905 |
def getBranch(self, name, **context):
"""Return a branch of this tree where the 'name' OID may reside"""
for keyLen in self._vars.getKeysLens():
subName = name[:keyLen]
if subName in self._vars:
return self._vars[subName]
raise error.NoSuchObjectError(name=name, idx=context.get('idx')) | 0.005698 |
def convert_gempak_color(c, style='psc'):
"""Convert GEMPAK color numbers into corresponding Matplotlib colors.
Takes a sequence of GEMPAK color numbers and turns them into
equivalent Matplotlib colors. Various GEMPAK quirks are respected,
such as treating negative values as equivalent to 0.
Parameters
----------
c : int or sequence of ints
GEMPAK color number(s)
style : str, optional
The GEMPAK 'device' to use to interpret color numbers. May be 'psc'
(the default; best for a white background) or 'xw' (best for a black background).
Returns
-------
List of strings of Matplotlib colors, or a single string if only one color requested.
"""
def normalize(x):
"""Transform input x to an int in range 0 to 31 consistent with GEMPAK color quirks."""
x = int(x)
if x < 0 or x == 101:
x = 0
else:
x = x % 32
return x
# Define GEMPAK colors (Matplotlib doesn't appear to like numbered variants)
cols = ['white', # 0/32
'black', # 1
'red', # 2
'green', # 3
'blue', # 4
'yellow', # 5
'cyan', # 6
'magenta', # 7
'#CD6839', # 8 (sienna3)
'#FF8247', # 9 (sienna1)
'#FFA54F', # 10 (tan1)
'#FFAEB9', # 11 (LightPink1)
'#FF6A6A', # 12 (IndianRed1)
'#EE2C2C', # 13 (firebrick2)
'#8B0000', # 14 (red4)
'#CD0000', # 15 (red3)
'#EE4000', # 16 (OrangeRed2)
'#FF7F00', # 17 (DarkOrange1)
'#CD8500', # 18 (orange3)
'gold', # 19
'#EEEE00', # 20 (yellow2)
'chartreuse', # 21
'#00CD00', # 22 (green3)
'#008B00', # 23 (green4)
'#104E8B', # 24 (DodgerBlue4)
'DodgerBlue', # 25
'#00B2EE', # 26 (DeepSkyBlue2)
'#00EEEE', # 27 (cyan2)
'#8968CD', # 28 (MediumPurple3)
'#912CEE', # 29 (purple2)
'#8B008B', # 30 (magenta4)
'bisque'] # 31
if style != 'psc':
if style == 'xw':
cols[0] = 'black'
cols[1] = 'bisque'
cols[31] = 'white'
else:
raise ValueError('Unknown style parameter')
try:
c_list = list(c)
res = [cols[normalize(x)] for x in c_list]
except TypeError:
res = cols[normalize(c)]
return res | 0.001888 |
def _set_source(self, v, load=False):
"""
Setter method for source, mapped from YANG variable /overlay_class_map/cmap_seq/match/source (ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_source is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="source", rest_name="source", parent=self, choice=(u'overlay-match-ip', u'case-overlay-ip-src'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Source IPv4 Address: A.B.C.D'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source must be of a type compatible with ipv4-address""",
'defined-type': "brocade-overlay-policy:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="source", rest_name="source", parent=self, choice=(u'overlay-match-ip', u'case-overlay-ip-src'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Source IPv4 Address: A.B.C.D'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='ipv4-address', is_config=True)""",
})
self.__source = t
if hasattr(self, '_set'):
self._set() | 0.004808 |
def unregister(self, observers):
u"""
Concrete method of Subject.unregister().
Unregister observers as an argument to self.observers.
"""
if isinstance(observers, list) or isinstance(observers, tuple):
for observer in observers:
try:
index = self._observers.index(observer)
self._observers.remove(self._observers[index])
except ValueError:
# logging
print('{observer} not in list...'.format(observer))
elif isinstance(observers, base.Observer):
try:
index = self._observers.index(observers)
self._observers.remove(self._observers[index])
except ValueError:
# logging
print('{observer} not in list...'.format(observers))
else:
err_message = ('ConfigReader.register support'
'ListType, TupleType and {observer} Object.'
''.format(base.Observer.__name__)
)
raise ValueError(err_message) | 0.001724 |
def kms_to_kpcGyrDecorator(func):
"""Decorator to convert velocities from km/s to kpc/Gyr"""
@wraps(func)
def kms_to_kpcGyr_wrapper(*args,**kwargs):
return func(args[0],velocity_in_kpcGyr(args[1],1.),args[2],**kwargs)
return kms_to_kpcGyr_wrapper | 0.022222 |
def clear(self):
"""Clear task output: remove value ``budget_inflation_adjusted`` from all :class:`Movie` objects.
"""
self.mark_incomplete()
session = client.get_client().create_session()
movies = session.query(models.Movie)
movies.update({'budget_inflation_adjusted': None})
session.commit()
session.close() | 0.008 |
def valid(self,individuals=None,F=None):
"""returns the sublist of individuals with valid fitness."""
if F:
valid_locs = self.valid_loc(F)
else:
valid_locs = self.valid_loc(self.F)
if individuals:
return [ind for i,ind in enumerate(individuals) if i in valid_locs]
else:
return [ind for i,ind in enumerate(self.pop.individuals) if i in valid_locs] | 0.016055 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.