text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def obtain_hosting_device_credentials_from_config():
"""Obtains credentials from config file and stores them in memory.
To be called before hosting device templates defined in the config file
are created.
"""
cred_dict = get_specific_config('cisco_hosting_device_credential')
attr_info = {
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None}, 'is_visible': True,
'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'user_name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'password': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'type': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None}, 'is_visible': True,
'default': ''}}
credentials = {}
for cred_uuid, kv_dict in cred_dict.items():
# ensure cred_uuid is properly formatted
cred_uuid = uuidify(cred_uuid)
verify_resource_dict(kv_dict, True, attr_info)
credentials[cred_uuid] = kv_dict
return credentials | 0.000693 |
def remove_range(self, start, end):
'''Remove a range by score.
'''
return self._sl.remove_range(
start, end, callback=lambda sc, value: self._dict.pop(value)) | 0.010256 |
def _win32_strerror(err):
""" expand a win32 error code into a human readable message """
# FormatMessage will allocate memory and assign it here
buf = ctypes.c_char_p()
FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM
| FORMAT_MESSAGE_ALLOCATE_BUFFER
| FORMAT_MESSAGE_IGNORE_INSERTS,
None,
err,
0,
buf,
0,
None,
)
try:
return buf.value
finally:
LocalFree(buf) | 0.002128 |
def __get_overall_data(self, x):
"""
(recursive) Collect all "sensorGenus" and "sensorSpecies" fields, set data to self
:param any x: Any data type
:return none:
"""
if isinstance(x, dict):
if "sensorGenus" in x:
if x["sensorGenus"] and x["sensorGenus"] not in self.lsts_tmp["genus"]:
self.lsts_tmp["genus"].append(x["sensorGenus"])
if "sensorSpecies" in x:
if x["sensorSpecies"] and x["sensorSpecies"] not in self.lsts_tmp["species"]:
self.lsts_tmp["species"].append(x["sensorSpecies"])
if "archiveType" in x:
if x["archiveType"] and x["archiveType"] not in self.lsts_tmp["archive"]:
self.lsts_tmp["archive"].append(x["archiveType"])
if "QCnotes" in x:
if x["QCnotes"] and x["QCnotes"] not in self.lsts_tmp["qc"]:
self.lsts_tmp["qc"].append(x["QCnotes"])
for k, v in x.items():
if isinstance(v, dict):
self.__get_overall_data(v)
elif isinstance(v, list):
self.__get_overall_data(v)
elif isinstance(x, list):
for i in x:
self.__get_overall_data(i)
return x | 0.004525 |
def drawdown_recov(self, return_int=False):
"""Length of drawdown recovery in days.
This is the duration from trough to recovery date.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta
"""
td = self.recov_date() - self.drawdown_end()
if return_int:
return td.days
return td | 0.003597 |
def config(self):
"""Get a Configuration object from the file contents."""
conf = config.Configuration()
for namespace in self.namespaces:
if not hasattr(conf, namespace):
if not self._strict:
continue
raise exc.NamespaceNotRegistered(
"The namespace {0} is not registered.".format(namespace)
)
name = getattr(conf, namespace)
for item, value in compat.iteritems(self.items(namespace)):
if not hasattr(name, item):
if not self._strict:
continue
raise exc.OptionNotRegistered(
"The option {0} is not registered.".format(item)
)
setattr(name, item, value)
return conf | 0.002291 |
def to_csv(data, field_names=None, filename='data.csv',
overwrite=True,
write_headers=True, append=False, flat=True,
primary_fields=None, sort_fields=True):
"""
DEPRECATED Write a list of dicts to a csv file
:param data: List of dicts
:param field_names: The list column names
:param filename: The name of the file
:param overwrite: Overwrite the file if exists
:param write_headers: Write the headers to the csv file
:param append: Write new rows if the file exists
:param flat: Flatten the dictionary before saving
:param primary_fields: The first columns of the csv file
:param sort_fields: Sort the field names alphabetically
:return: None
"""
# Don't overwrite if not specified
if not overwrite and path.isfile(filename):
raise FileExistsError('The file already exists')
# Replace file if append not specified
write_type = 'w' if not append else 'a'
# Flatten if flat is specified, or there are no predefined field names
if flat or not field_names:
data = [flatten(datum) for datum in data]
# Fill in gaps between dicts with empty string
if not field_names:
field_names, data = fill_gaps(data)
# Sort fields if specified
if sort_fields:
field_names.sort()
# If there are primary fields, move the field names to the front and sort
# based on first field
if primary_fields:
for key in primary_fields[::-1]:
field_names.insert(0, field_names.pop(field_names.index(key)))
data = sorted(data, key=lambda k: k[field_names[0]], reverse=True)
# Write the file
with open(filename, write_type, encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=field_names, lineterminator='\n')
if not append or write_headers:
writer.writeheader()
# Write rows containing fields in field names
for datum in data:
for key in list(datum.keys()):
if key not in field_names:
del datum[key]
elif type(datum[key]) is str:
datum[key] = datum[key].strip()
datum[key] = str(datum[key])
writer.writerow(datum) | 0.00043 |
async def _init_writer(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
async with self._initialization_lock:
if not self.initialized:
self.stream = await aiofiles.open(
file=self.absolute_file_path,
mode=self.mode,
encoding=self.encoding,
loop=self.loop,
) | 0.004149 |
def GetClosestPoint(x, a, b):
"""
Returns the point on the great circle segment ab closest to x.
"""
assert(x.IsUnitLength())
assert(a.IsUnitLength())
assert(b.IsUnitLength())
a_cross_b = a.RobustCrossProd(b)
# project to the great circle going through a and b
p = x.Minus(
a_cross_b.Times(
x.DotProd(a_cross_b) / a_cross_b.Norm2()))
# if p lies between a and b, return it
if SimpleCCW(a_cross_b, a, p) and SimpleCCW(p, b, a_cross_b):
return p.Normalize()
# otherwise return the closer of a or b
if x.Minus(a).Norm2() <= x.Minus(b).Norm2():
return a
else:
return b | 0.022617 |
def run_shell_command(commands, **kwargs):
"""Run a shell command."""
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
output, error = p.communicate()
return p.returncode, output, error | 0.003165 |
def register(self, classes=[]):
"""
Registers new plugins.
The registration only creates a new entry for a plugin inside the _classes dictionary.
It does not activate or even initialise the plugin.
A plugin must be a class, which inherits directly or indirectly from GwBasePattern.
:param classes: List of plugin classes
:type classes: list
"""
if not isinstance(classes, list):
raise AttributeError("plugins must be a list, not %s." % type(classes))
plugin_registered = []
for plugin_class in classes:
plugin_name = plugin_class.__name__
self.register_class(plugin_class, plugin_name)
self._log.debug("Plugin %s registered" % plugin_name)
plugin_registered.append(plugin_name)
self._log.info("Plugins registered: %s" % ", ".join(plugin_registered)) | 0.005488 |
def get_species(species_id):
''' Return a single species '''
result = _get(species_id, settings.SPECIES)
return Species(result.content) | 0.006803 |
def open_target_group_for_form(self, form):
"""
Makes sure that the first group that should be open is open.
This is either the first group with errors or the first group
in the container, unless that first group was originally set to
active=False.
"""
target = self.first_container_with_errors(form.errors.keys())
if target is None:
target = self.fields[0]
if not getattr(target, '_active_originally_included', None):
target.active = True
return target
target.active = True
return target | 0.003221 |
def dfs_grid(grid, i, j, mark='X', free='.'):
"""DFS on a grid, mark connected component, iterative version
:param grid: matrix, 4-neighborhood
:param i,j: cell in this matrix, start of DFS exploration
:param free: symbol for walkable cells
:param mark: symbol to overwrite visited vertices
:complexity: linear
"""
height = len(grid)
width = len(grid[0])
to_visit = [(i, j)]
grid[i][j] = mark
while to_visit:
i1, j1 = to_visit.pop()
for i2, j2 in [(i1 + 1, j1), (i1, j1 + 1),
(i1 - 1, j1), (i1, j1 - 1)]:
if (0 <= i2 < height and 0 <= j2 < width and
grid[i2][j2] == free):
grid[i2][j2] = mark # mark path
to_visit.append((i2, j2)) | 0.001277 |
def append(self, content, encoding='utf8'):
"""
add a line to file
"""
if not self.parent.exists:
self.parent.create()
with open(self._filename, "ab") as output_file:
if not is_text(content):
Log.error(u"expecting to write unicode only")
output_file.write(content.encode(encoding))
output_file.write(b"\n") | 0.004878 |
def delete(self, service, path, **kwargs):
""" Make a delete requests (this returns a coroutine)"""
return self.make_request(Methods.DELETE, service, path, **kwargs) | 0.01105 |
def parse(self, src):
"""Parses CSS string source using the current cssBuilder.
Use for embedded stylesheets."""
self.cssBuilder.beginStylesheet()
try:
# XXX Some simple preprocessing
src = cssSpecial.cleanupCSS(src)
try:
src, stylesheet = self._parseStylesheet(src)
except self.ParseError as err:
err.setFullCSSSource(src)
raise
finally:
self.cssBuilder.endStylesheet()
return stylesheet | 0.003656 |
def assign_method(stochastic, scale=None, verbose=-1):
"""
Returns a step method instance to handle a
variable. If several methods have the same competence,
it picks one arbitrarily (using set.pop()).
"""
# Retrieve set of best candidates
best_candidates = pick_best_methods(stochastic)
# Randomly grab and appropriate method
method = best_candidates.pop()
failure_header = """Failed attempting to automatically assign step method class %s
to stochastic variable %s. Try setting %s's competence method to return 0
and manually assigning it when appropriate. See the user guide.
Error message: """ % (method.__name__, stochastic.__name__, method.__name__)
try:
if scale:
out = method(stochastic, scale=scale, verbose=verbose)
else:
out = method(stochastic, verbose=verbose)
except:
a, b, c = sys.exc_info()
try:
args = list(b.args)
except AttributeError:
args = []
args.append(failure_header)
b.args = args
six.reraise(a, b, c)
return out | 0.002712 |
def enable_capture_state(self, state, writeToHw=False):
"""
Enable/Disable capture on resource group
"""
if state:
activePorts = self.rePortInList.findall(self.activePortList)
self.activeCapturePortList = "{{" + activePorts[0] + "}}"
else:
self.activeCapturePortList = "{{""}}"
if (writeToHw):
self.ix_command('write') | 0.004831 |
def _make_outputnode(self, frequency):
"""
Generates an output node for the given frequency. It also adds implicit
file format conversion nodes to the pipeline.
Parameters
----------
frequency : str
The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or
'per_study') of the output node to retrieve
"""
# Check to see whether there are any outputs for the given frequency
outputs = list(self.frequency_outputs(frequency))
if not outputs:
raise ArcanaError(
"No outputs to '{}' pipeline for requested freqency '{}'"
.format(self.name, frequency))
# Get list of output names for the requested frequency, addding fields
# to hold iterator IDs
output_names = [o.name for o in outputs]
# Generate output node and connect it to appropriate nodes
outputnode = self.add('{}_outputnode'.format(frequency),
IdentityInterface(fields=output_names))
# Loop through list of nodes connected to study data specs and
# connect them to the newly created output node
for output in outputs: # @ReservedAssignment
(node, node_out, format, # @ReservedAssignment @IgnorePep8
conv_kwargs) = self._output_conns[output.name]
# If fileset formats differ between study and pipeline
# outputs create converter node (if one hasn't been already)
# and connect output to that before connecting to outputnode
if self.requires_conversion(output, format):
conv = output.format.converter_from(format, **conv_kwargs)
node = self.add(
'conv_{}_from_{}_format'.format(output.name, format.name),
conv.interface,
inputs={conv.input: (node, node_out)},
requirements=conv.requirements,
mem_gb=conv.mem_gb,
wall_time=conv.wall_time)
node_out = conv.output
self.connect(node, node_out, outputnode, output.name)
return outputnode | 0.00091 |
def make_checksum_validation_script(stats_list):
"""Make batch files required for checking checksums from another machine."""
if not os.path.exists('./hash_check'):
os.mkdir('./hash_check')
with open('./hash_check/curl.sh', 'w') as curl_f, open(
'./hash_check/md5.txt', 'w'
) as md5_f, open('./hash_check/sha1.txt', 'w') as sha1_f:
curl_f.write('#!/usr/bin/env bash\n\n')
for stats_dict in stats_list:
for sysmeta_xml in stats_dict['largest_sysmeta_xml']:
print(sysmeta_xml)
sysmeta_pyxb = d1_common.types.dataoneTypes_v1_2.CreateFromDocument(
sysmeta_xml
)
pid = sysmeta_pyxb.identifier.value().encode('utf-8')
file_name = re.sub('\W+', '_', pid)
size = sysmeta_pyxb.size
base_url = stats_dict['gmn_dict']['base_url']
if size > 100 * 1024 * 1024:
logging.info('Ignored large object. size={} pid={}')
curl_f.write('# {} {}\n'.format(size, pid))
curl_f.write(
'curl -o obj/{} {}/v1/object/{}\n'.format(
file_name, base_url, d1_common.url.encodePathElement(pid)
)
)
if sysmeta_pyxb.checksum.algorithm == 'MD5':
md5_f.write(
'{} obj/{}\n'.format(sysmeta_pyxb.checksum.value(), file_name)
)
else:
sha1_f.write(
'{} obj/{}\n'.format(sysmeta_pyxb.checksum.value(), file_name)
)
with open('./hash_check/check.sh', 'w') as f:
f.write('#!/usr/bin/env bash\n\n')
f.write('mkdir -p obj\n')
f.write('./curl.sh\n')
f.write('sha1sum -c sha1.txt\n')
f.write('md5sum -c md5.txt\n') | 0.003652 |
def parameters(self) -> List['Parameter']:
"""Return a list of parameter objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
Parameter(_lststr, _type_to_spans, span, 'Parameter')
for span in self._subspans('Parameter')] | 0.006579 |
def loadJSON(self, jdata):
"""
Initializes the information for this class from the given JSON data blob.
:param jdata: <dict>
"""
# required params
self.__name = jdata['name']
self.__field = jdata['field']
# optional fields
self.__display = jdata.get('display') or self.__display
self.__flags = jdata.get('flags') or self.__flags
self.__defaultOrder = jdata.get('defaultOrder') or self.__defaultOrder
self.__default = jdata.get('default') or self.__default | 0.005415 |
def _get_unit_factor(cls, unit):
"""
Returns the unit factor depending on the unit constant
:param int unit: the unit of the factor requested
:returns: a function to convert the raw sensor value to the given unit
:rtype: lambda function
:raises UnsupportedUnitError: if the unit is not supported
"""
try:
if isinstance(unit, str):
unit = cls.UNIT_FACTOR_NAMES[unit]
return cls.UNIT_FACTORS[unit]
except KeyError:
raise UnsupportedUnitError() | 0.005102 |
def remove_translation(self, context_id, translation_id):
"""Removes a translation entry from a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int translation_id: The id-value representing the translation.
:return bool: True if translation entry removal was successful.
"""
return self.context.deleteAddressTranslation(translation_id,
id=context_id) | 0.004016 |
def inverse_kinematics(self, target, initial_position=None, **kwargs):
"""Computes the inverse kinematic on the specified target
Parameters
----------
target: numpy.array
The frame target of the inverse kinematic, in meters. It must be 4x4 transformation matrix
initial_position: numpy.array
Optional : the initial position of each joint of the chain. Defaults to 0 for each joint
Returns
-------
The list of the positions of each joint according to the target. Note : Inactive joints are in the list.
"""
# Checks on input
target = np.array(target)
if target.shape != (4, 4):
raise ValueError("Your target must be a 4x4 transformation matrix")
if initial_position is None:
initial_position = [0] * len(self.links)
return ik.inverse_kinematic_optimization(self, target, starting_nodes_angles=initial_position, **kwargs) | 0.006104 |
def mpub(self, topic, *messages):
'''Publish multiple messages to a topic'''
return self.send(constants.MPUB + ' ' + topic, messages) | 0.013423 |
def build_circles(self, circles):
""" Process data to construct rectangles
This method is built from the assumption that the circles parameter
is a list of:
lists : a list with 3 elements indicating
[center_latitude, center_longitude, radius]
tuples : a tuple with 3 elements indicating
(center_latitude, center_longitude, radius)
dicts: a dictionary with circle attributes
So, for instance, we have this general scenario as a input parameter:
[[22.345,45.44, 1000],
(22.345,45.44,200),
{
'stroke_color': stroke_color,
'stroke_opacity': stroke_opacity,
'stroke_weight': stroke_weight,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'center': {'lat': center_latitude,
'lng': center_longitude,
},
'radius': radius
}]
"""
if not circles:
return
if not isinstance(circles, list):
raise AttributeError('circles accepts only lists')
for circle in circles:
if isinstance(circle, dict):
self.add_circle(**circle)
elif isinstance(circle, (tuple, list)):
if len(circle) != 3:
raise AttributeError('circle requires center and radius')
circle_dict = self.build_circle_dict(circle[0],
circle[1],
circle[2])
self.add_circle(**circle_dict) | 0.001186 |
def read_envvar_file(name, extension):
"""
Read values from a file provided as a environment variable
``NAME_CONFIG_FILE``.
:param name: environment variable prefix to look for (without the
``_CONFIG_FILE``)
:param extension: *(unused)*
:return: a `.Configuration`, possibly `.NotConfigured`
"""
envvar_file = environ.get('{}_config_file'.format(name).upper())
if envvar_file:
# envvar set, load value as file
return loadf(envvar_file)
else:
# envvar not set, return an empty source
return NotConfigured | 0.001712 |
def update_kwargs(kwargs, **keyvalues):
"""Update dict with keys and values if keys do not already exist.
>>> kwargs = {'one': 1, }
>>> update_kwargs(kwargs, one=None, two=2)
>>> kwargs == {'one': 1, 'two': 2}
True
"""
for key, value in keyvalues.items():
if key not in kwargs:
kwargs[key] = value | 0.002882 |
def fast_ordering(self, structure, num_remove_dict, num_to_return=1):
"""
This method uses the matrix form of ewaldsum to calculate the ewald
sums of the potential structures. This is on the order of 4 orders of
magnitude faster when there are large numbers of permutations to
consider. There are further optimizations possible (doing a smarter
search of permutations for example), but this wont make a difference
until the number of permutations is on the order of 30,000.
"""
self.logger.debug("Performing fast ordering")
starttime = time.time()
self.logger.debug("Performing initial ewald sum...")
ewaldmatrix = EwaldSummation(structure).total_energy_matrix
self.logger.debug("Ewald sum took {} seconds."
.format(time.time() - starttime))
starttime = time.time()
m_list = []
for indices, num in num_remove_dict.items():
m_list.append([0, num, list(indices), None])
self.logger.debug("Calling EwaldMinimizer...")
minimizer = EwaldMinimizer(ewaldmatrix, m_list, num_to_return,
PartialRemoveSitesTransformation.ALGO_FAST)
self.logger.debug("Minimizing Ewald took {} seconds."
.format(time.time() - starttime))
all_structures = []
lowest_energy = minimizer.output_lists[0][0]
num_atoms = sum(structure.composition.values())
for output in minimizer.output_lists:
s = structure.copy()
del_indices = []
for manipulation in output[1]:
if manipulation[1] is None:
del_indices.append(manipulation[0])
else:
s.replace(manipulation[0], manipulation[1])
s.remove_sites(del_indices)
struct = s.get_sorted_structure()
all_structures.append(
{"energy": output[0],
"energy_above_minimum": (output[0] - lowest_energy)
/ num_atoms,
"structure": struct})
return all_structures | 0.000921 |
def t_HEXCONSTANT(self, t):
r'0x[0-9A-Fa-f]+'
t.value = int(t.value, 16)
t.type = 'INTCONSTANT'
return t | 0.014706 |
def kill_window(self):
"""Kill the current :class:`Window` object. ``$ tmux kill-window``."""
proc = self.cmd(
'kill-window',
# '-t:%s' % self.id
'-t%s:%s' % (self.get('session_id'), self.index),
)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
self.server._update_windows() | 0.005348 |
def get_time(self):
"""Time of the TIFF file
Currently, only the file modification time is supported.
Note that the modification time of the TIFF file is
dependent on the file system and may have temporal
resolution as low as 3 seconds.
"""
if isinstance(self.path, pathlib.Path):
thetime = self.path.stat().st_mtime
else:
thetime = np.nan
return thetime | 0.004435 |
def idctii(x, axes=None):
"""
Compute a multi-dimensional inverse DCT-II over specified array axes.
This function is implemented by calling the one-dimensional inverse
DCT-II :func:`scipy.fftpack.idct` with normalization mode 'ortho'
for each of the specified axes.
Parameters
----------
a : array_like
Input array
axes : sequence of ints, optional (default None)
Axes over which to compute the inverse DCT-II.
Returns
-------
y : ndarray
Inverse DCT-II of input array
"""
if axes is None:
axes = list(range(x.ndim))
for ax in axes[::-1]:
x = fftpack.idct(x, type=2, axis=ax, norm='ortho')
return x | 0.001431 |
def operation_list(uploader):
"""List file on target"""
files = uploader.file_list()
for f in files:
log.info("{file:30s} {size}".format(file=f[0], size=f[1])) | 0.005587 |
def deaccent(text):
"""
Remove accentuation from the given string.
"""
norm = unicodedata.normalize("NFD", text)
result = "".join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result) | 0.003968 |
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
]) | 0.001536 |
def uncache_zipdir(path):
"""Ensure that the importer caches dont have stale info for `path`"""
from zipimport import _zip_directory_cache as zdc
_uncache(path, zdc)
_uncache(path, sys.path_importer_cache) | 0.004525 |
def convertLatLngToPixelXY(self, lat, lng, level):
'''
returns the x and y values of the pixel corresponding to a latitude
and longitude.
'''
mapSize = self.getMapDimensionsByZoomLevel(level)
lat = self.clipValue(lat, self.min_lat, self.max_lat)
lng = self.clipValue(lng, self.min_lng, self.max_lng)
x = (lng + 180) / 360
sinlat = math.sin(lat * math.pi / 180)
y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi)
pixelX = int(self.clipValue(x * mapSize + 0.5, 0, mapSize - 1))
pixelY = int(self.clipValue(y * mapSize + 0.5, 0, mapSize - 1))
return (pixelX, pixelY) | 0.002928 |
def get(self, key, fallback=None):
"""
look up global config values from alot's config
:param key: key to look up
:type key: str
:param fallback: fallback returned if key is not present
:type fallback: str
:returns: config value with type as specified in the spec-file
"""
value = None
if key in self._config:
value = self._config[key]
if isinstance(value, Section):
value = None
if value is None:
value = fallback
return value | 0.003472 |
def _element_append_path(
start_element, # type: ET.Element
element_names # type: Iterable[Text]
):
# type: (...) -> ET.Element
"""
Append the list of element names as a path to the provided start element.
:return: The final element along the path.
"""
end_element = start_element
for element_name in element_names:
new_element = ET.Element(element_name)
end_element.append(new_element)
end_element = new_element
return end_element | 0.001972 |
def setup_xrates(base, rates):
"""
If using the Python money package, this will set up the xrates exchange
rate data.
:param base:
The string currency code to use as the base
:param rates:
A dict with keys that are string currency codes and values that are
a Decimal of the exchange rate for that currency.
"""
xrates.install('money.exchange.SimpleBackend')
xrates.base = base
for code, value in rates.items():
xrates.setrate(code, value) | 0.001965 |
def is_running(self):
"""
Return true if the node is running
"""
self.__update_status()
return self.status == Status.UP or self.status == Status.DECOMMISSIONED | 0.01005 |
def _display_progress(data, stream):
"""
expecting the following data scheme:
{
u'status': u'Pushing',
u'progressDetail': {
u'current': 655337,
u'start': 1413994898,
u'total': 20412416
},
u'id': u'51783549ce98',
u'progress': u'[=> ] 655.3 kB/20.41 MB 30s'
}
{
u'status': u'Buffering to disk',
u'progressDetail': {
u'current': 13369344,
u'start': 1413994898
},
u'id': u'51783549ce98',
u'progress': u'13.37 MB'
}
"""
if type(data) is not dict:
raise TypeError("data should be of type dict. the following was passed: {0}".format(data))
stream.write("\r%s %s" % (data['status'], data['progress']))
if 'Pushing' in data['status']:
if data['progress_detail']['current'] == data['progress_detail'].get('total'):
stream.write("\n")
stream.flush() | 0.003287 |
def from_wif_or_ewif_file(path: str, password: Optional[str] = None) -> SigningKeyType:
"""
Return SigningKey instance from Duniter WIF or EWIF file
:param path: Path to WIF of EWIF file
:param password: Password needed for EWIF file
"""
with open(path, 'r') as fh:
wif_content = fh.read()
# check data field
regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE)
match = search(regex, wif_content)
if not match:
raise Exception('Error: Bad format WIF or EWIF v1 file')
# capture hexa wif key
wif_hex = match.groups()[0]
return SigningKey.from_wif_or_ewif_hex(wif_hex, password) | 0.004202 |
def defBoroCnst(self,BoroCnstArt):
'''
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self. Uses the artificial and natural borrowing constraints.
Parameters
----------
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
Returns
-------
none
'''
# Calculate the minimum allowable value of money resources in this period
self.BoroCnstNat = (self.solution_next.mNrmMin - self.TranShkMinNext)*\
(self.PermGroFac*self.PermShkMinNext)/self.Rfree
# Note: need to be sure to handle BoroCnstArt==None appropriately.
# In Py2, this would evaluate to 5.0: np.max([None, 5.0]).
# However in Py3, this raises a TypeError. Thus here we need to directly
# address the situation in which BoroCnstArt == None:
if BoroCnstArt is None:
self.mNrmMinNow = self.BoroCnstNat
else:
self.mNrmMinNow = np.max([self.BoroCnstNat,BoroCnstArt])
if self.BoroCnstNat < self.mNrmMinNow:
self.MPCmaxEff = 1.0 # If actually constrained, MPC near limit is 1
else:
self.MPCmaxEff = self.MPCmaxNow
# Define the borrowing constraint (limiting consumption function)
self.cFuncNowCnst = LinearInterp(np.array([self.mNrmMinNow, self.mNrmMinNow+1]),
np.array([0.0, 1.0])) | 0.006989 |
def _parse_response_body_from_xml_node(node, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
return_obj = return_type()
_MinidomXmlToObject._fill_data_to_return_object(node, return_obj)
return return_obj | 0.006897 |
def onStart(self, event):
"""
Display the environment of a started container
"""
c = event.container
print '+' * 5, 'started:', c
kv = lambda s: s.split('=', 1)
env = {k: v for (k, v) in (kv(s) for s in c.attrs['Config']['Env'])}
print env | 0.009901 |
def invalidate_cache(self, klass, extra=None, **kwargs):
"""
Invalidate a cache for a specific class.
This will loop through all registered groups that have registered
the given model class and call their invalidate_cache method.
All keyword arguments will be directly passed through to the
group's invalidate_cache method, with the exception of **extra**
as noted below.
:param klass: The model class that need some invalidation.
:param extra: A dictionary where the key corresponds to the name \
of a group where this model is registered and a value that is a \
list that will be passed as the extra keyword argument when \
calling invalidate_cache on that group. In this way you can \
specify specific extra values to invalidate only for specific \
groups.
"""
extra = extra or kwargs.pop('extra', {})
for group in self._registry.values():
if klass in group.models:
e = extra.get(group.key)
group.invalidate_cache(klass, extra=e, **kwargs) | 0.001775 |
def document_type(self, key, value):
"""Populate the ``document_type`` key.
Also populates the ``_collections``, ``citeable``, ``core``, ``deleted``,
``refereed``, ``publication_type``, and ``withdrawn`` keys through side
effects.
"""
schema = load_schema('hep')
publication_type_schema = schema['properties']['publication_type']
valid_publication_types = publication_type_schema['items']['enum']
document_type = self.get('document_type', [])
publication_type = self.get('publication_type', [])
a_values = force_list(value.get('a'))
for a_value in a_values:
normalized_a_value = a_value.strip().lower()
if normalized_a_value == 'arxiv':
continue # XXX: ignored.
elif normalized_a_value == 'citeable':
self['citeable'] = True
elif normalized_a_value == 'core':
self['core'] = True
elif normalized_a_value == 'noncore':
self['core'] = False
elif normalized_a_value == 'published':
self['refereed'] = True
elif normalized_a_value == 'withdrawn':
self['withdrawn'] = True
elif normalized_a_value == 'deleted':
self['deleted'] = True
elif normalized_a_value in COLLECTIONS_MAP:
self.setdefault('_collections', []).append(COLLECTIONS_MAP[normalized_a_value])
elif normalized_a_value in DOCUMENT_TYPE_MAP:
document_type.append(DOCUMENT_TYPE_MAP[normalized_a_value])
elif normalized_a_value in valid_publication_types:
publication_type.append(normalized_a_value)
c_value = force_single_element(value.get('c', ''))
normalized_c_value = c_value.strip().lower()
if normalized_c_value == 'deleted':
self['deleted'] = True
self['publication_type'] = publication_type
return document_type | 0.001072 |
def __serializedDot(self):
"""
DOT format:
digraph graphname {
a -> b [label=instanceOf];
b -> d [label=isA];
}
"""
temp = ""
for x,y,z in self.rdflib_graph.triples((None, None, None)):
temp += """"%s" -> "%s" [label="%s"];\n""" % (self.namespace_manager.normalizeUri(x), self.namespace_manager.normalizeUri(z), self.namespace_manager.normalizeUri(y))
temp = "digraph graphname {\n%s}" % temp
return temp | 0.046083 |
def parse_headers(content_disposition, location=None, relaxed=False):
"""Build a ContentDisposition from header values.
"""
LOGGER.debug(
'Content-Disposition %r, Location %r', content_disposition, location)
if content_disposition is None:
return ContentDisposition(location=location)
# Both alternatives seem valid.
if False:
# Require content_disposition to be ascii bytes (0-127),
# or characters in the ascii range
content_disposition = ensure_charset(content_disposition, 'ascii')
else:
# We allow non-ascii here (it will only be parsed inside of
# qdtext, and rejected by the grammar if it appears in
# other places), although parsing it can be ambiguous.
# Parsing it ensures that a non-ambiguous filename* value
# won't get dismissed because of an unrelated ambiguity
# in the filename parameter. But it does mean we occasionally
# give less-than-certain values for some legacy senders.
content_disposition = ensure_charset(content_disposition, 'iso-8859-1')
# Check the caller already did LWS-folding (normally done
# when separating header names and values; RFC 2616 section 2.2
# says it should be done before interpretation at any rate).
# Hopefully space still means what it should in iso-8859-1.
# This check is a bit stronger that LWS folding, it will
# remove CR and LF even if they aren't part of a CRLF.
# However http doesn't allow isolated CR and LF in headers outside
# of LWS.
if relaxed:
# Relaxed has two effects (so far):
# the grammar allows a final ';' in the header;
# we do LWS-folding, and possibly normalise other broken
# whitespace, instead of rejecting non-lws-safe text.
# XXX Would prefer to accept only the quoted whitespace
# case, rather than normalising everything.
content_disposition = normalize_ws(content_disposition)
parser = content_disposition_value_relaxed
else:
# Turns out this is occasionally broken: two spaces inside
# a quoted_string's qdtext. Firefox and Chrome save the two spaces.
if not is_lws_safe(content_disposition):
raise ValueError(
content_disposition, 'Contains nonstandard whitespace')
parser = content_disposition_value
try:
parsed = parser.parse(content_disposition)
except FullFirstMatchException:
return ContentDisposition(location=location)
return ContentDisposition(
disposition=parsed[0], assocs=parsed[1:], location=location) | 0.000379 |
def _preprocess_successor(self, state, add_guard=True): #pylint:disable=unused-argument
"""
Preprocesses the successor state.
:param state: the successor state
"""
# Next, simplify what needs to be simplified
if o.SIMPLIFY_EXIT_STATE in state.options:
state.solver.simplify()
if o.SIMPLIFY_EXIT_GUARD in state.options:
state.scratch.guard = state.solver.simplify(state.scratch.guard)
if o.SIMPLIFY_EXIT_TARGET in state.options:
state.scratch.target = state.solver.simplify(state.scratch.target)
# unwrap stuff from SimActionObjects
state.scratch.target = _raw_ast(state.scratch.target)
state.scratch.guard = _raw_ast(state.scratch.guard)
# apply the guard constraint and new program counter to the state
if add_guard:
state.add_constraints(state.scratch.guard)
# trigger inspect breakpoints here since this statement technically shows up in the IRSB as the "next"
state.regs.ip = state.scratch.target
# For architectures with no stack pointer, we can't manage a callstack. This has the side effect of breaking
# SimProcedures that call out to binary code self.call.
if self.initial_state.arch.sp_offset is not None and not isinstance(state.arch, ArchSoot):
self._manage_callstack(state)
if len(self.successors) != 0:
# This is a fork!
state._inspect('fork', BP_AFTER)
# clean up the state
state.options.discard(o.AST_DEPS)
state.options.discard(o.AUTO_REFS) | 0.004926 |
def _determine_stream_track(self,nTrackChunks):
"""Determine the track of the stream in real space"""
#Determine how much orbital time is necessary for the progenitor's orbit to cover the stream
if nTrackChunks is None:
#default is floor(self._deltaAngleTrack/0.15)+1
self._nTrackChunks= int(numpy.floor(self._deltaAngleTrack/0.15))+1
else:
self._nTrackChunks= nTrackChunks
if self._nTrackChunks < 4: self._nTrackChunks= 4
if not hasattr(self,'nInterpolatedTrackChunks'):
self.nInterpolatedTrackChunks= 1001
dt= self._deltaAngleTrack\
/self._progenitor_Omega_along_dOmega
self._trackts= numpy.linspace(0.,2*dt,2*self._nTrackChunks-1) #to be sure that we cover it
if self._useTM:
return self._determine_stream_track_TM()
#Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
0.) #angle = 0
auxiliaryTrack= Orbit(prog_stream_offset[3])
if dt < 0.:
self._trackts= numpy.linspace(0.,-2.*dt,2*self._nTrackChunks-1)
#Flip velocities before integrating
auxiliaryTrack= auxiliaryTrack.flip()
auxiliaryTrack.integrate(self._trackts,self._pot)
if dt < 0.:
#Flip velocities again
auxiliaryTrack._orb.orbit[:,1]= -auxiliaryTrack._orb.orbit[:,1]
auxiliaryTrack._orb.orbit[:,2]= -auxiliaryTrack._orb.orbit[:,2]
auxiliaryTrack._orb.orbit[:,4]= -auxiliaryTrack._orb.orbit[:,4]
#Calculate the actions, frequencies, and angle for this auxiliary orbit
acfs= self._aA.actionsFreqs(auxiliaryTrack(0.),
use_physical=False)
auxiliary_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3\
)
auxiliary_Omega_along_dOmega= \
numpy.dot(auxiliary_Omega,self._dsigomeanProgDirection)
#Now calculate the actions, frequencies, and angles + Jacobian for each chunk
allAcfsTrack= numpy.empty((self._nTrackChunks,9))
alljacsTrack= numpy.empty((self._nTrackChunks,6,6))
allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6))
thetasTrack= numpy.linspace(0.,self._deltaAngleTrack,
self._nTrackChunks)
ObsTrack= numpy.empty((self._nTrackChunks,6))
ObsTrackAA= numpy.empty((self._nTrackChunks,6))
detdOdJps= numpy.empty((self._nTrackChunks))
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
auxiliaryTrack,
self._trackts[ii]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,auxiliaryTrack,
self._trackts[x]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega),
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Repeat the track calculation using the previous track, to get closer to it
for nn in range(self.nTrackIterations):
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
Orbit(ObsTrack[ii,:]),
0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x:self.meanOmega(x,use_physical=False),
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,Orbit(ObsTrack[x,:]),0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Store the track
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._allAcfsTrack= allAcfsTrack
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
self._calc_ObsTrackXY()
return None | 0.02026 |
def add_sub_directory(self, key, path):
"""Adds a sub-directory to the results directory.
Parameters
----------
key: str
A look-up key for the directory path.
path: str
The relative path from the root of the results directory to the sub-directory.
Returns
-------
str:
The absolute path to the sub-directory.
"""
sub_dir_path = os.path.join(self.results_root, path)
os.makedirs(sub_dir_path, exist_ok=True)
self._directories[key] = sub_dir_path
return sub_dir_path | 0.00495 |
def _get_interpretation_function(interpretation, dtype):
"""
Retrieves the interpretation function used.
"""
type_string = dtype.__name__
name = "%s__%s" % (interpretation, type_string)
global _interpretations
if not hasattr(_interpretations, name):
raise ValueError("No transform available for type '%s' with interpretation '%s'."
% (type_string, interpretation))
return getattr(_interpretations, name) | 0.004237 |
def dtpool(name):
"""
Return the data about a kernel pool variable.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dtpool_c.html
:param name: Name of the variable whose value is to be returned.
:type name: str
:return:
Number of values returned for name,
Type of the variable "C", "N", or "X".
:rtype: tuple
"""
name = stypes.stringToCharP(name)
found = ctypes.c_int()
n = ctypes.c_int()
typeout = ctypes.c_char()
libspice.dtpool_c(name, ctypes.byref(found), ctypes.byref(n),
ctypes.byref(typeout))
return n.value, stypes.toPythonString(typeout.value), bool(found.value) | 0.001464 |
def encode_multipart(data, files):
"""Encode multipart.
:arg dict data: Data to be encoded
:arg dict files: Files to be encoded
:returns: Encoded binary string
:raises: :class:`UrlfetchException`
"""
body = BytesIO()
boundary = choose_boundary()
part_boundary = b('--%s\r\n' % boundary)
writer = codecs.lookup('utf-8')[3]
if isinstance(data, dict):
for name, values in data.items():
if not isinstance(values, (list, tuple, set)):
# behave like urllib.urlencode(dict, 1)
values = (values, )
for value in values:
body.write(part_boundary)
writer(body).write('Content-Disposition: form-data; '
'name="%s"\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(value, int):
value = str(value)
if py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
for fieldname, f in files.items():
if isinstance(f, tuple):
filename, f = f
elif hasattr(f, 'name'):
filename = basename(f.name)
else:
filename = None
raise UrlfetchException("file must has filename")
if hasattr(f, 'read'):
value = f.read()
elif isinstance(f, basestring):
value = f
else:
value = str(f)
body.write(part_boundary)
if filename:
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b'Content-Type: application/octet-stream\r\n\r\n')
else:
writer(body).write('Content-Disposition: form-data; name="%s"'
'\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
body.write(b('--' + boundary + '--\r\n'))
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body.getvalue() | 0.000424 |
def _parse_topic_table(self, xml, tds='title,created,comment,group', selector='//table[@class="olt"]//tr'):
"""
解析话题列表
:internal
:param xml: 页面XML
:param tds: 每列的含义,可以是title, created, comment, group, updated, author, time, rec
:param selector: 表在页面中的位置
:return:
"""
xml_results = xml.xpath(selector)
results = []
tds = tds.split(',')
for item in xml_results:
try:
result = {}
index = 0
for td in tds:
index += 1
if td == 'title':
xml_title = item.xpath('.//td[position()=%s]/a' % index)[0]
url = xml_title.get('href')
tid = int(slash_right(url))
title = xml_title.text
result.update({'id': tid, 'url': url, 'title': title})
elif td == 'created':
xml_created = item.xpath('.//td[position()=%s]/a' % index) \
or item.xpath('.//td[position()=%s]' % index)
created_at = xml_created[0].get('title')
result['created_at'] = created_at
elif td == 'comment':
xml_comment = item.xpath('.//td[position()=%s]/span' % index) \
or item.xpath('.//td[position()=%s]' % index)
comment_count = int(re.match(r'\d+', xml_comment[0].text).group())
result['comment_count'] = comment_count
elif td == 'group':
xml_group = item.xpath('.//td[position()=%s]/a' % index)[0]
group_url = xml_group.get('href')
group_alias = slash_right(group_url)
group_name = xml_group.text
result.update({'group_alias': group_alias, 'group_url': group_url, 'group_name': group_name})
elif td == 'author':
xml_author = item.xpath('.//td[position()=%s]/a' % index)[0]
author_url = xml_author.get('href')
author_alias = slash_right(author_url)
author_nickname = xml_author.text
result.update({
'author_url': author_url,
'author_alias': author_alias,
'author_nickname': author_nickname,
})
elif td == 'updated':
result['updated_at'] = item.xpath('.//td[position()=%s]/text()' % index)[0]
elif td == 'time':
result['time'] = item.xpath('.//td[position()=%s]/text()' % index)[0]
elif td == 'rec':
xml_rec = item.xpath('.//td[position()=%s]//a[@class="lnk-remove"]/@href' % (index - 1))[0]
result['rec_id'] = re.search(r'rec_id=(\d+)', xml_rec).groups()[0]
results.append(result)
except Exception as e:
self.api.api.logger.exception('parse topic table exception: %s' % e)
return results | 0.006329 |
def trigger_all_change_callbacks(self):
"""Trigger all callbacks that were set with on_change()."""
return [
ret
for key in DatastoreLegacy.store[self.domain].keys()
for ret in self.trigger_change_callbacks(key)
] | 0.007326 |
def exit(self):
"""Stop the simple WSGI server running the appliation."""
if self._server is not None:
self._server.shutdown()
self._server.server_close()
self._server = None | 0.00885 |
def set_encryption_passphrases(self, encryption_passphrases):
'''Set encryption passphrases'''
self.encryption_passphrases = self._update_dict(encryption_passphrases,
{}, replace_data=True) | 0.007663 |
def set_published_date(self, published_date=None):
"""Sets the published date.
:param published_date: the new published date
:type published_date: ``osid.calendaring.DateTime``
:raise: ``InvalidArgument`` -- ``published_date`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``published_date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if published_date is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['published_date'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(published_date, metadata, array=False):
self._my_map['publishedDate'] = published_date # This is probably wrong
else:
raise InvalidArgument() | 0.003348 |
def viewTs(ts):
"""
View the contents of one time series entry in a nicely formatted way
| Example
| 1. D = lipd.readLipd()
| 2. ts = lipd.extractTs(D)
| 3. viewTs(ts[0])
:param dict ts: One time series entry
:return none:
"""
_ts = ts
if isinstance(ts, list):
_ts = ts[0]
print("It looks like you input a full time series. It's best to view one entry at a time.\n"
"I'll show you the first entry...")
_tmp_sort = OrderedDict()
_tmp_sort["ROOT"] = {}
_tmp_sort["PUBLICATION"] = {}
_tmp_sort["GEO"] = {}
_tmp_sort["OTHERS"] = {}
_tmp_sort["DATA"] = {}
# Organize the data by section
for k,v in _ts.items():
if not any(i == k for i in ["paleoData", "chronData", "mode", "@context"]):
if k in ["archiveType", "dataSetName", "googleSpreadSheetKey", "metadataMD5", "tagMD5", "googleMetadataWorksheet", "lipdVersion"]:
_tmp_sort["ROOT"][k] = v
elif "pub" in k:
_tmp_sort["PUBLICATION"][k] = v
elif "geo" in k:
_tmp_sort["GEO"][k] = v
elif "paleoData_" in k or "chronData_" in k:
if isinstance(v, list) and len(v) > 2:
_tmp_sort["DATA"][k] = "[{}, {}, {}, ...]".format(v[0], v[1], v[2])
else:
_tmp_sort["DATA"][k] = v
else:
if isinstance(v, list) and len(v) > 2:
_tmp_sort["OTHERS"][k] = "[{}, {}, {}, ...]".format(v[0], v[1], v[2])
else:
_tmp_sort["OTHERS"][k] = v
# Start printing the data to console
for k1, v1 in _tmp_sort.items():
print("\n{}\n===============".format(k1))
for k2, v2 in v1.items():
print("{} : {}".format(k2, v2))
return | 0.003792 |
def get_registered(option_hooks=None, event_hooks=None,
command_hooks=None, root_access=None,
task_active=True):
""" Returns a generator of registered plugins matching filters.
`option_hooks`
Boolean to include or exclude plugins using option hooks.
`event_hooks`
Boolean to include or exclude task event plugins.
`command_hooks`
Boolean to include or exclude command plugins.
`root_access`
Boolean to include or exclude root plugins.
`task_active`
Set to ``False`` to not filter by task-based plugins.
Returns list of ``Plugin`` instances.
"""
plugins = []
for _, item in _registered:
plugin, type_info = item
# filter out any task-specific plugins
if task_active:
if type_info.get('disabled'):
continue
else:
if plugin.options or plugin.task_only:
continue
if not option_hooks is None:
if option_hooks != bool(type_info.get('option')):
continue
if not event_hooks is None:
if event_hooks != bool(type_info.get('event')):
continue
if not command_hooks is None:
if command_hooks != bool(type_info.get('command')):
continue
if not root_access is None:
if root_access != plugin.needs_root:
continue
plugins.append(plugin)
return plugins | 0.003215 |
def ortholog(args):
"""
%prog ortholog species_a species_b
Run a sensitive pipeline to find orthologs between two species a and b.
The pipeline runs LAST and generate .lifted.anchors.
`--full` mode would assume 1-to-1 quota synteny blocks as the backbone of
such predictions. Extra orthologs will be recruited from reciprocal best
match (RBH).
"""
from jcvi.apps.align import last as last_main
from jcvi.compara.blastfilter import main as blastfilter_main
from jcvi.compara.quota import main as quota_main
from jcvi.compara.synteny import scan, mcscan, liftover
from jcvi.formats.blast import cscore, filter
p = OptionParser(ortholog.__doc__)
p.add_option("--dbtype", default="nucl",
choices=("nucl", "prot"),
help="Molecule type of subject database")
p.add_option("--full", default=False, action="store_true",
help="Run in full mode, including blocks and RBH")
p.add_option("--cscore", default=0.7, type="float",
help="C-score cutoff [default: %default]")
p.add_option("--dist", default=20, type="int",
help="Extent of flanking regions to search")
p.add_option("--quota", help="Quota align parameter")
p.add_option("--nostdpf", default=False, action="store_true",
help="Do not standardize contig names")
p.add_option("--no_strip_names", default=False, action="store_true",
help="Do not strip alternative splicing "
"(e.g. At5g06540.1 -> At5g06540)")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
a, b = args
dbtype = opts.dbtype
suffix = ".cds" if dbtype == "nucl" else ".pep"
abed, afasta = a + ".bed", a + suffix
bbed, bfasta = b + ".bed", b + suffix
ccscore = opts.cscore
quota = opts.quota
dist = "--dist={0}".format(opts.dist)
aprefix = afasta.split(".")[0]
bprefix = bfasta.split(".")[0]
pprefix = ".".join((aprefix, bprefix))
qprefix = ".".join((bprefix, aprefix))
last = pprefix + ".last"
if need_update((afasta, bfasta), last):
last_main([bfasta, afasta], dbtype)
if a == b:
lastself = last + ".P98L0.inverse"
if need_update(last, lastself):
filter([last, "--hitlen=0", "--pctid=98", "--inverse", "--noself"])
last = lastself
filtered_last = last + ".filtered"
if need_update(last, filtered_last):
if opts.no_strip_names:
blastfilter_main([last, "--cscore={0}".format(ccscore), "--no_strip_names"])
else:
blastfilter_main([last, "--cscore={0}".format(ccscore)])
anchors = pprefix + ".anchors"
lifted_anchors = pprefix + ".lifted.anchors"
pdf = pprefix + ".pdf"
if not opts.full:
if need_update(filtered_last, lifted_anchors):
if opts.no_strip_names:
scan([filtered_last, anchors, dist,
"--liftover={0}".format(last), "--no_strip_names"])
else:
scan([filtered_last, anchors, dist,
"--liftover={0}".format(last)])
if quota:
quota_main([lifted_anchors,
"--quota={0}".format(quota), "--screen"])
if need_update(anchors, pdf):
from jcvi.graphics.dotplot import dotplot_main
dargs = [anchors]
if opts.nostdpf:
dargs += ["--nostdpf", "--skipempty"]
dotplot_main(dargs)
return
if need_update(filtered_last, anchors):
if opts.no_strip_names:
scan([filtered_last, anchors, dist, "--no_strip_names"])
else:
scan([filtered_last, anchors, dist])
ooanchors = pprefix + ".1x1.anchors"
if need_update(anchors, ooanchors):
quota_main([anchors, "--quota=1:1", "--screen"])
lifted_anchors = pprefix + ".1x1.lifted.anchors"
if need_update((last, ooanchors), lifted_anchors):
if opts.no_strip_names:
liftover([last, ooanchors, dist, "--no_strip_names"])
else:
liftover([last, ooanchors, dist])
pblocks = pprefix + ".1x1.blocks"
qblocks = qprefix + ".1x1.blocks"
if need_update(lifted_anchors, [pblocks, qblocks]):
mcscan([abed, lifted_anchors, "--iter=1", "-o", pblocks])
mcscan([bbed, lifted_anchors, "--iter=1", "-o", qblocks])
rbh = pprefix + ".rbh"
if need_update(last, rbh):
cscore([last, "-o", rbh])
portho = pprefix + ".ortholog"
qortho = qprefix + ".ortholog"
if need_update([pblocks, qblocks, rbh], [portho, qortho]):
make_ortholog(pblocks, rbh, portho)
make_ortholog(qblocks, rbh, qortho) | 0.001476 |
def openOrders(self) -> List[Order]:
"""
List of all open orders.
"""
return [trade.order for trade in self.wrapper.trades.values()
if trade.orderStatus.status not in OrderStatus.DoneStates] | 0.008403 |
def cudnnSoftmaxForward(handle, algorithm, mode, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
This routing computes the softmax function
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
algorithm : cudnnSoftmaxAlgorithm
Enumerant to specify the softmax algorithm.
mode : cudnnSoftmaxMode
Enumerant to specify the softmax mode.
alpha: float
Scaling factor with which every element of the input tensors is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
else:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
status = _libcudnn.cudnnSoftmaxForward(handle, algorithm, mode, alphaRef,
srcDesc, srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status) | 0.004376 |
def wait_for_at_least(self, new_state):
"""
Wait for a state to be entered which is greater than or equal to
`new_state` and return.
"""
if not (self._state < new_state):
return
fut = asyncio.Future(loop=self.loop)
self._least_waiters.append((new_state, fut))
yield from fut | 0.005698 |
def generate_mavlink(directory, xml):
'''generate MVMavlink header and implementation'''
f = open(os.path.join(directory, "MVMavlink.h"), mode='w')
t.write(f,'''
//
// MVMavlink.h
// MAVLink communications protocol built from ${basename}.xml
//
// Created on ${parse_time} by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMessage.h"
${{message_definition_files:#import "MV${name_camel_case}Messages.h"
}}
@class MVMavlink;
@protocol MVMessage;
@protocol MVMavlinkDelegate <NSObject>
/*!
Method called on the delegate when a full message has been received. Note that this may be called multiple times when parseData: is called, if the data passed to parseData: contains multiple messages.
@param mavlink The MVMavlink object calling this method
@param message The id<MVMessage> class containing the parsed message
*/
- (void)mavlink:(MVMavlink *)mavlink didGetMessage:(id<MVMessage>)message;
/*!
Method called on the delegate when data should be sent.
@param mavlink The MVMavlink object calling this method
@param data NSData object containing the bytes to be sent
*/
- (BOOL)mavlink:(MVMavlink *)mavlink shouldWriteData:(NSData *)data;
@end
/*!
Class for parsing and sending instances of id<MVMessage>
@discussion MVMavlink receives a stream of bytes via the parseData: method and calls the delegate method mavlink:didGetMessage: each time a message is fully parsed. Users of MVMavlink can call parseData: anytime they get new data, even if that data does not contain a complete message.
*/
@interface MVMavlink : NSObject
@property (weak, nonatomic) id<MVMavlinkDelegate> delegate;
/*!
Parse byte data received from a MAVLink byte stream.
@param data NSData containing the received bytes
*/
- (void)parseData:(NSData *)data;
/*!
Compile MVMessage object into a bytes and pass to the delegate for sending.
@param message Object conforming to the MVMessage protocol that represents the data to be sent
@return YES if message sending was successful
*/
- (BOOL)sendMessage:(id<MVMessage>)message;
@end
''', xml)
f.close()
f = open(os.path.join(directory, "MVMavlink.m"), mode='w')
t.write(f,'''
//
// MVMavlink.m
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMavlink.h"
@implementation MVMavlink
- (void)parseData:(NSData *)data {
mavlink_message_t msg;
mavlink_status_t status;
char *bytes = (char *)[data bytes];
for (NSInteger i = 0; i < [data length]; ++i) {
if (mavlink_parse_char(MAVLINK_COMM_0, bytes[i], &msg, &status)) {
// Packet received
id<MVMessage> message = [MVMessage messageWithCMessage:msg];
[_delegate mavlink:self didGetMessage:message];
}
}
}
- (BOOL)sendMessage:(id<MVMessage>)message {
return [_delegate mavlink:self shouldWriteData:[message data]];
}
@end
''', xml)
f.close() | 0.002048 |
def list_exports(exports='/etc/exports'):
'''
List configured exports
CLI Example:
.. code-block:: bash
salt '*' nfs.list_exports
'''
ret = {}
with salt.utils.files.fopen(exports, 'r') as efl:
for line in salt.utils.stringutils.to_unicode(efl.read()).splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
# Handle the case where the same path is given twice
if not comps[0] in ret:
ret[comps[0]] = []
newshares = []
for perm in comps[1:]:
if perm.startswith('/'):
newshares.append(perm)
continue
permcomps = perm.split('(')
permcomps[1] = permcomps[1].replace(')', '')
hosts = permcomps[0]
if not isinstance(hosts, six.string_types):
# Lists, etc would silently mangle /etc/exports
raise TypeError('hosts argument must be a string')
options = permcomps[1].split(',')
ret[comps[0]].append({'hosts': hosts, 'options': options})
for share in newshares:
ret[share] = ret[comps[0]]
return ret | 0.000752 |
def get_config(self, budget):
"""
Function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration
"""
# No observations available for this budget sample from the prior
if len(self.kde_models.keys()) == 0:
return self.configspace.sample_configuration().get_dictionary()
# If we haven't seen anything with this budget, we sample from the kde trained on the highest budget
if budget not in self.kde_models.keys():
budget = sorted(self.kde_models.keys())[-1]
# TODO: This only works in continuous space and with gaussian kernels
kde = self.kde_models[budget]
idx = np.random.randint(0, len(self.kde_models[budget].data))
vector = [sps.truncnorm.rvs(-m/bw,(1-m)/bw, loc=m, scale=bw) for m,bw in zip(self.kde_models[budget].data[idx], kde.bw)]
if np.any(np.array(vector)>1) or np.any(np.array(vector)<0):
raise RuntimeError("truncated normal sampling problems!")
sample = ConfigSpace.Configuration(self.configspace, vector=vector)
return sample.get_dictionary(), {} | 0.029508 |
def _assign_funcs(self, by_name=False, inst_module=None):
"""Assign all external science instrument methods to Instrument object.
"""
import importlib
# set defaults
self._list_rtn = self._pass_func
self._load_rtn = self._pass_func
self._default_rtn = self._pass_func
self._clean_rtn = self._pass_func
self._init_rtn = self._pass_func
self._download_rtn = self._pass_func
# default params
self.directory_format = None
self.file_format = None
self.multi_file_day = False
self.orbit_info = None
if by_name:
# look for code with filename name, any errors passed up
inst = importlib.import_module(''.join(('.', self.platform, '_',
self.name)),
package='pysat.instruments')
elif inst_module is not None:
# user supplied an object with relevant instrument routines
inst = inst_module
else:
# no module or name info, default pass functions assigned
return
try:
self._load_rtn = inst.load
self._list_rtn = inst.list_files
self._download_rtn = inst.download
except AttributeError:
estr = 'A load, file_list, and download routine are required for '
raise AttributeError('{:s}every instrument.'.format(estr))
try:
self._default_rtn = inst.default
except AttributeError:
pass
try:
self._init_rtn = inst.init
except AttributeError:
pass
try:
self._clean_rtn = inst.clean
except AttributeError:
pass
# look for instrument default parameters
try:
self.directory_format = inst.directory_format
except AttributeError:
pass
try:
self.multi_file_day = inst.multi_file_day
except AttributeError:
pass
try:
self.orbit_info = inst.orbit_info
except AttributeError:
pass
return | 0.002693 |
def close(*args, **kwargs):
r"""Close last created figure, alias to ``plt.close()``."""
_, plt, _ = _import_plt()
plt.close(*args, **kwargs) | 0.006579 |
def replace(self, **kwargs):
""" Return a :class:`.Date` with one or more components replaced
with new values.
"""
return Date(kwargs.get("year", self.__year),
kwargs.get("month", self.__month),
kwargs.get("day", self.__day)) | 0.006734 |
def calculate_text_coords(rectangle_coords):
"""Calculate Canvas text coordinates based on rectangle coords"""
return (int(rectangle_coords[0] + (rectangle_coords[2] - rectangle_coords[0]) / 2),
int(rectangle_coords[1] + (rectangle_coords[3] - rectangle_coords[1]) / 2)) | 0.013245 |
def gdate(self):
"""Return the Gregorian date for the given Hebrew date object."""
if self._last_updated == "gdate":
return self._gdate
return conv.jdn_to_gdate(self._jdn) | 0.009662 |
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map") | 0.00463 |
def d2logpdf_dlink2_dvar(self, link_f, y, Y_metadata=None):
"""
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: derivative of log likelihood evaluated at points link(f) w.r.t variance parameter
:rtype: Nx1 array
"""
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
val = np.log(y) - link_f
val_scaled = val/np.sqrt(self.variance)
val_scaled2 = val/self.variance
a = (1 - stats.norm.cdf(val_scaled))
uncensored = (1-c)*( 1./(self.variance**2) )
censored = c*( val*np.exp(-3*(val**2)/(2*self.variance) )/ ((a**3)*np.sqrt(8*np.pi**3)*self.variance**(5/2.))
+ np.exp(-val**2/self.variance)/((a**2)*4*np.pi*self.variance**2)
- np.exp(-val**2/self.variance)*val**2 / ((a**2)*2*np.pi*self.variance**3)
+ np.exp(-val**2/self.variance)/ ( (a**2)*4*np.pi*self.variance**2)
- np.exp(-val**2/ (2*self.variance))*val / ( a*np.sqrt(2*np.pi)*2*self.variance**(5/2.))
- np.exp(-val**2/self.variance)*(val**2) / ((a**2)*4*np.pi*self.variance**3)
- np.exp(-val**2/ (2*self.variance))*val/ (a*np.sqrt(2*np.pi)*self.variance**(5/2.))
+ np.exp(-val**2/ (2*self.variance))*(val**3) / (a*np.sqrt(2*np.pi)*2*self.variance**(7/2.)) )
dlik_hess_dsigma = uncensored + censored
return dlik_hess_dsigma | 0.014363 |
def getSlicedArray(self, copy=True):
""" Slice the rti using a tuple of slices made from the values of the combo and spin boxes.
:param copy: If True (the default), a copy is made so that inspectors cannot
accidentally modify the underlying of the RTIs. You can set copy=False as a
potential optimization, but only if you are absolutely sure that you don't modify
the the slicedArray in your inspector! Note that this function calls transpose,
which can still make a copy of the array for certain permutations.
:return: Numpy masked array with the same number of dimension as the number of
comboboxes (this can be zero!).
Returns None if no slice can be made (i.e. the RTI is not sliceable).
"""
#logger.debug("getSlicedArray() called")
if not self.rtiIsSliceable:
return None
# The dimensions that are selected in the combo boxes will be set to slice(None),
# the values from the spin boxes will be set as a single integer value
nDims = self.rti.nDims
sliceList = [slice(None)] * nDims
for spinBox in self._spinBoxes:
dimNr = spinBox.property("dim_nr")
sliceList[dimNr] = spinBox.value()
# Make the array slicer. It needs to be a tuple, a list of only integers will be
# interpreted as an index. With a tuple, array[(exp1, exp2, ..., expN)] is equivalent to
# array[exp1, exp2, ..., expN].
# See: http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
logger.debug("Array slice list: {}".format(str(sliceList)))
slicedArray = self.rti[tuple(sliceList)]
# Make a copy to prevent inspectors from modifying the underlying array.
if copy:
slicedArray = ma.copy(slicedArray)
# If there are no comboboxes the sliceList will contain no Slices objects, only ints. Then
# the resulting slicedArray will be a usually a scalar (only structured fields may yield an
# array). We convert this scalar to a zero-dimensional Numpy array so that inspectors
# always get an array (having the same number of dimensions as the dimensionality of the
# inspector, i.e. the number of comboboxes).
if self.maxCombos == 0:
slicedArray = ma.MaskedArray(slicedArray)
# Post-condition type check
check_is_an_array(slicedArray, np.ndarray)
# Enforce the return type to be a masked array.
if not isinstance(slicedArray, ma.MaskedArray):
slicedArray = ma.MaskedArray(slicedArray)
# Add fake dimensions of length 1 so that result.ndim will equal the number of combo boxes
for dimNr in range(slicedArray.ndim, self.maxCombos):
#logger.debug("Adding fake dimension: {}".format(dimNr))
slicedArray = ma.expand_dims(slicedArray, dimNr)
# Post-condition dimension check
assert slicedArray.ndim == self.maxCombos, \
"Bug: getSlicedArray should return a {:d}D array, got: {}D" \
.format(self.maxCombos, slicedArray.ndim)
# Convert to ArrayWithMask class for working around issues with the numpy maskedarray
awm = ArrayWithMask.createFromMaskedArray(slicedArray)
del slicedArray
# Shuffle the dimensions to be in the order as specified by the combo boxes
comboDims = [self._comboBoxDimensionIndex(cb) for cb in self._comboBoxes]
permutations = np.argsort(comboDims)
logger.debug("slicedArray.shape: {}".format(awm.data.shape))
logger.debug("Transposing dimensions: {}".format(permutations))
awm = awm.transpose(permutations)
awm.checkIsConsistent()
return awm | 0.006283 |
def bcftools_stats_genstats_headers(self):
""" Add key statistics to the General Stats table """
stats_headers = OrderedDict()
stats_headers['number_of_records'] = {
'title': 'Vars',
'description': 'Variations total',
'min': 0, 'format': '{:,.0f}',
}
stats_headers['variations_hom'] = {
'title': 'Hom',
'description': 'Variations homozygous',
'min': 0, 'format': '{:,.0f}',
}
stats_headers['variations_het'] = {
'title': 'Het',
'description': 'Variations heterozygous',
'min': 0, 'format': '{:,.0f}',
}
stats_headers['number_of_SNPs'] = {
'title': 'SNP',
'description': 'Variation SNPs',
'min': 0, 'format': '{:,.0f}',
}
stats_headers['number_of_indels'] = {
'title': 'Indel',
'description': 'Variation Insertions/Deletions',
'min': 0, 'format': '{:,.0f}',
}
stats_headers['tstv'] = {
'title': 'Ts/Tv',
'description': 'Variant SNP transition / transversion ratio',
'min': 0, 'format': '{:,.2f}',
}
stats_headers['number_of_MNPs'] = {
'title': 'MNP',
'description': 'Variation multinucleotide polymorphisms',
'min': 0, 'format': '{:,.0f}', "hidden": True,
}
return stats_headers | 0.001365 |
def col_frequencies(col, weights=None, gap_chars='-.'):
"""Frequencies of each residue type (totaling 1.0) in a single column."""
counts = col_counts(col, weights, gap_chars)
# Reduce to frequencies
scale = 1.0 / sum(counts.values())
return dict((aa, cnt * scale) for aa, cnt in counts.iteritems()) | 0.003145 |
def element_to_unicode(element):
"""Serialize an XML element into a unicode string.
This should work the same on Python2 and Python3 and with all
:etree:`ElementTree` implementations.
:Parameters:
- `element`: the XML element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
"""
if hasattr(ElementTree, 'tounicode'):
# pylint: disable=E1103
return ElementTree.tounicode("element")
elif sys.version_info.major < 3:
return unicode(ElementTree.tostring(element))
else:
return ElementTree.tostring(element, encoding = "unicode") | 0.004808 |
def add_config(self, config):
"""
:param config:
:type config: dict
"""
self.pre_configure()
self.config = config
if not self.has_revision_file():
#: Create new revision file.
touch_file(self.revfile_path)
self.history.load(self.revfile_path)
self.archiver.target_path = self.dest_path
self.archiver.zip_path = self.tmp_file_path
self.state.state_path = os.path.join(
REVISION_HOME,
"clients",
self.key
)
self.state.prepare()
self.post_configure()
self.prepared = True | 0.003053 |
def windspeed(self, t):
"""Return the wind speed list at time `t`"""
ws = [0] * self.n
for i in range(self.n):
q = ceil(t / self.dt[i])
q_prev = 0 if q == 0 else q - 1
r = t % self.dt[i]
r = 0 if abs(r) < 1e-6 else r
if r == 0:
ws[i] = self.speed[i][q]
else:
t1 = self.time[i][q_prev]
s1 = self.speed[i][q_prev]
s2 = self.speed[i][q]
ws[i] = s1 + (t - t1) * (s2 - s1) / self.dt[i]
return matrix(ws) | 0.003413 |
def close(self):
"""
Close the file.
"""
if not self.closed:
self.closed = True
retval = self.f.close()
if self.base_mode != "r":
self.__size = self.fs.get_path_info(self.name)["size"]
return retval | 0.006803 |
def monte_carlo_csiszar_f_divergence(
f,
p_log_prob,
q,
num_draws,
use_reparametrization=None,
seed=None,
name=None):
"""Monte-Carlo approximation of the Csiszar f-Divergence.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Csiszar f-Divergence for Csiszar-function f is given by:
```none
D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ]
~= m**-1 sum_j^m f( p(x_j) / q(x_j) ),
where x_j ~iid q(X)
```
Tricks: Reparameterization and Score-Gradient
When q is "reparameterized", i.e., a diffeomorphic transformation of a
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
`grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}`
and `s_i = f(x_i), x_i ~iid q(X)`.
However, if q is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of unreparameterized distributions. In
this circumstance using the Score-Gradient trick results in an unbiased
gradient, i.e.,
```none
grad[ E_q[f(X)] ]
= grad[ int dx q(x) f(x) ]
= int dx grad[ q(x) f(x) ]
= int dx [ q'(x) f(x) + q(x) f'(x) ]
= int dx q(x) [q'(x) / q(x) f(x) + f'(x) ]
= int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ]
= E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ]
```
Unless `q.reparameterization_type != tfd.FULLY_REPARAMETERIZED` it is
usually preferable to set `use_reparametrization = True`.
Example Application:
The Csiszar f-Divergence is a useful framework for variational inference.
I.e., observe that,
```none
f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] )
<= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ]
:= D_f[p(x, Z), q(Z | x)]
```
The inequality follows from the fact that the "perspective" of `f`, i.e.,
`(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and
`t` is a real. Since the above framework includes the popular Evidence Lower
BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework
"Evidence Divergence Bound Optimization" (EDBO).
Args:
f: Python `callable` representing a Csiszar-function in log-space, i.e.,
takes `p_log_prob(q_samples) - q.log_prob(q_samples)`.
p_log_prob: Python `callable` taking (a batch of) samples from `q` and
returning the natural-log of the probability under distribution `p`.
(In variational inference `p` is the joint distribution.)
q: `tf.Distribution`-like instance; must implement:
`reparameterization_type`, `sample(n, seed)`, and `log_prob(x)`.
(In variational inference `q` is the approximate posterior distribution.)
num_draws: Integer scalar number of draws used to approximate the
f-Divergence expectation.
use_reparametrization: Python `bool`. When `None` (the default),
automatically set to:
`q.reparameterization_type == tfd.FULLY_REPARAMETERIZED`.
When `True` uses the standard Monte-Carlo average. When `False` uses the
score-gradient trick. (See above for details.) When `False`, consider
using `csiszar_vimco`.
seed: Python `int` seed for `q.sample`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
monte_carlo_csiszar_f_divergence: `float`-like `Tensor` Monte Carlo
approximation of the Csiszar f-Divergence.
Raises:
ValueError: if `q` is not a reparameterized distribution and
`use_reparametrization = True`. A distribution `q` is said to be
"reparameterized" when its samples are generated by transforming the
samples of another distribution which does not depend on the
parameterization of `q`. This property ensures the gradient (with respect
to parameters) is valid.
TypeError: if `p_log_prob` is not a Python `callable`.
"""
reparameterization_types = tf.nest.flatten(q.reparameterization_type)
with tf.compat.v1.name_scope(name, "monte_carlo_csiszar_f_divergence",
[num_draws]):
if use_reparametrization is None:
use_reparametrization = all(
reparameterization_type == tfd.FULLY_REPARAMETERIZED
for reparameterization_type in reparameterization_types)
elif (use_reparametrization and
any(reparameterization_type != tfd.FULLY_REPARAMETERIZED
for reparameterization_type in reparameterization_types)):
# TODO(jvdillon): Consider only raising an exception if the gradient is
# requested.
raise ValueError(
"Distribution `q` must be reparameterized, i.e., a diffeomorphic "
"transformation of a parameterless distribution. (Otherwise this "
"function has a biased gradient.)")
if not callable(p_log_prob):
raise TypeError("`p_log_prob` must be a Python `callable` function.")
return monte_carlo.expectation(
f=lambda q_samples: f(p_log_prob(q_samples) - q.log_prob(q_samples)),
samples=q.sample(num_draws, seed=seed),
log_prob=q.log_prob, # Only used if use_reparametrization=False.
use_reparametrization=use_reparametrization) | 0.002293 |
def _is_multiframe_diffusion_imaging(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe dti dataset
NOTE: We already assue this is a 4D dataset as input
"""
header = dicom_input[0]
if "PerFrameFunctionalGroupsSequence" not in header:
return False
# check if there is diffusion info in the frame
found_diffusion = False
diffusion_tag = Tag(0x0018, 0x9117)
for frame in header.PerFrameFunctionalGroupsSequence:
if diffusion_tag in frame:
found_diffusion = True
break
if not found_diffusion:
return False
return True | 0.003082 |
def pad_position_w(self, i):
"""
Determines the position of the ith pad in the width direction.
Assumes equally spaced pads.
:param i: ith number of pad in width direction (0-indexed)
:return:
"""
if i >= self.n_pads_w:
raise ModelError("pad index out-of-bounds")
return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2 | 0.007075 |
def create_histogram(df):
""" create a mg line plot
Args:
df (pandas.DataFrame): data to plot
"""
fig = Figure("/mg/histogram/", "mg_histogram")
fig.layout.set_size(width=450, height=200)
fig.layout.set_margin(left=40, right=40)
fig.graphics.animate_on_load()
# Make a histogram with 20 bins
return Histogram(df, fig, "value", 20, init_params={"Data": "Steps"}) | 0.00241 |
def record_participation(self, client, dt=None):
"""Record a user's participation in a test along with a given variation"""
if dt is None:
date = datetime.now()
else:
date = dt
experiment_key = self.experiment.name
pipe = self.redis.pipeline()
pipe.sadd(_key("p:{0}:years".format(experiment_key)), date.strftime('%Y'))
pipe.sadd(_key("p:{0}:months".format(experiment_key)), date.strftime('%Y-%m'))
pipe.sadd(_key("p:{0}:days".format(experiment_key)), date.strftime('%Y-%m-%d'))
pipe.execute()
keys = [
_key("p:{0}:_all:all".format(experiment_key)),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y'))),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y-%m'))),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y-%m-%d'))),
_key("p:{0}:{1}:all".format(experiment_key, self.name)),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y'))),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y-%m'))),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y-%m-%d'))),
]
msetbit(keys=keys, args=([self.experiment.sequential_id(client), 1] * len(keys))) | 0.008785 |
def set_my_info(self, message, contact_info=""):
"""set my contact info to ____: Set your emergency contact info."""
contacts = self.load("contact_info", {})
contacts[message.sender.handle] = {
"info": contact_info,
"name": message.sender.name,
}
self.save("contact_info", contacts)
self.say("Got it.", message=message) | 0.005115 |
def split_predicate(ex: Extraction) -> Extraction:
"""
Ensure single word predicate
by adding "before-predicate" and "after-predicate"
arguments.
"""
rel_toks = ex.toks[char_to_word_index(ex.rel.span[0], ex.sent) \
: char_to_word_index(ex.rel.span[1], ex.sent) + 1]
if not rel_toks:
return ex
verb_inds = [tok_ind for (tok_ind, tok)
in enumerate(rel_toks)
if tok.tag_.startswith('VB')]
last_verb_ind = verb_inds[-1] if verb_inds \
else (len(rel_toks) - 1)
rel_parts = [element_from_span([rel_toks[last_verb_ind]],
'V')]
before_verb = rel_toks[ : last_verb_ind]
after_verb = rel_toks[last_verb_ind + 1 : ]
if before_verb:
rel_parts.append(element_from_span(before_verb, "BV"))
if after_verb:
rel_parts.append(element_from_span(after_verb, "AV"))
return Extraction(ex.sent, ex.toks, ex.arg1, rel_parts, ex.args2, ex.confidence) | 0.007782 |
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Substitution structure, decorated with charge
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = self.bulk_structure.copy()
defect_structure.make_supercell(supercell)
# consider modifying velocity property to make sure defect site is decorated
# consistently with bulk structure for final defect_structure
defect_properties = self.site.properties.copy()
if ('velocities' in self.bulk_structure.site_properties) and \
'velocities' not in defect_properties:
if all( vel == self.bulk_structure.site_properties['velocities'][0]
for vel in self.bulk_structure.site_properties['velocities']):
defect_properties['velocities'] = self.bulk_structure.site_properties['velocities'][0]
else:
raise ValueError("No velocity property specified for defect site and "
"bulk_structure velocities are not homogeneous. Please specify this "
"property within the initialized defect_site object.")
#create a trivial defect structure to find where supercell transformation moves the lattice
site_properties_for_fake_struct = {prop: [val] for prop,val in defect_properties.items()}
struct_for_defect_site = Structure( self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True,
site_properties = site_properties_for_fake_struct)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
poss_deflist = sorted(
defect_structure.get_sites_in_sphere(defect_site.coords, 2, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
subsite = defect_structure.pop(defindex)
defect_structure.append(self.site.specie.symbol, subsite.coords, coords_are_cartesian=True,
properties = defect_site.properties)
defect_structure.set_charge(self.charge)
return defect_structure | 0.010717 |
def getvar(root, name, vtype='', dimensions=(), digits=0, fill_value=None,
source=None):
"""
Return a variable from a NCFile or NCPackage instance. If the variable
doesn't exists create it.
Keyword arguments:
root -- the root descriptor returned by the 'open' function
name -- the name of the variable
vtype -- the type of each value, ex ['f4', 'i4', 'i1', 'S1'] (default '')
dimensions -- the tuple with dimensions name of the variables (default ())
digits -- the precision required when using a 'f4' vtype (default 0)
fill_value -- the initial value used in the creation time (default None)
source -- the source variable to be copied (default None)
"""
return root.getvar(name, vtype, dimensions, digits, fill_value, source) | 0.001266 |
def list_names(cls):
"""Retrieve paas id and names."""
ret = dict([(item['id'], item['name'])
for item in cls.list({'items_per_page': 500})])
return ret | 0.010204 |
def list_remote(local_root):
"""Get remote branch/tag latest SHAs.
:raise GitError: When git ls-remote fails.
:param str local_root: Local path to git root directory.
:return: List of tuples containing strings. Each tuple is sha, name, kind.
:rtype: list
"""
command = ['git', 'ls-remote', '--heads', '--tags']
try:
output = run_command(local_root, command)
except CalledProcessError as exc:
raise GitError('Git failed to list remote refs.', exc.output)
# Dereference annotated tags if any. No need to fetch annotations.
if '^{}' in output:
parsed = list()
for group in (m.groupdict() for m in RE_REMOTE.finditer(output)):
dereferenced, name, kind = group['name'].endswith('^{}'), group['name'][:-3], group['kind']
if dereferenced and parsed and kind == parsed[-1]['kind'] == 'tags' and name == parsed[-1]['name']:
parsed[-1]['sha'] = group['sha']
else:
parsed.append(group)
else:
parsed = [m.groupdict() for m in RE_REMOTE.finditer(output)]
return [[i['sha'], i['name'], i['kind']] for i in parsed] | 0.002577 |
def guess_codec(file, errors="strict", require_char=False):
"""Look at file contents and guess its correct encoding.
File must be open in binary mode and positioned at offset 0. If BOM
record is present then it is assumed to be UTF-8 or UTF-16 encoded
file. GEDCOM header is searched for CHAR record and encoding name
is extracted from it, if BOM record is present then CHAR record
must match BOM-defined encoding.
:param file: File object, must be open in binary mode.
:param str errors: Controls error handling behavior during string
decoding, accepts same values as standard `codecs.decode` method.
:param bool require_char: If True then exception is thrown if CHAR
record is not found in a header, if False and CHAR is not in the
header then codec determined from BOM or "gedcom" is returned.
:returns: Tuple (codec_name, bom_size)
:raises: :py:class:`CodecError` when codec name in file is unknown or
when codec name in file contradicts codec determined from BOM.
:raises: :py:class:`UnicodeDecodeError` when codec fails to decode
input lines and `errors` is set to "strict" (default).
"""
# mapping of gedcom character set specifiers to Python encoding names
gedcom_char_to_codec = {
'ansel': 'gedcom',
}
# check BOM first
bom_codec = check_bom(file)
bom_size = file.tell()
codec = bom_codec or 'gedcom'
# scan header until CHAR or end of header
while True:
# this stops at '\n'
line = file.readline()
if not line:
raise IOError("Unexpected EOF while reading GEDCOM header")
# do not decode bytes to strings here, reason is that some
# stupid apps split CONC record at byte level (in middle of
# of multi-byte characters). This implies that we can only
# work with encodings that have ASCII as single-byte subset.
line = line.lstrip().rstrip(b"\r\n")
words = line.split()
if len(words) >= 2 and words[0] == b"0" and words[1] != b"HEAD":
# past header but have not seen CHAR
if require_char:
raise CodecError("GEDCOM header does not have CHAR record")
else:
break
elif len(words) >= 3 and words[0] == b"1" and words[1] == b"CHAR":
try:
encoding = words[2].decode(codec, errors)
encoding = gedcom_char_to_codec.get(encoding.lower(),
encoding.lower())
new_codec = codecs.lookup(encoding).name
except LookupError:
raise CodecError("Unknown codec name {0}".format(encoding))
if bom_codec is None:
codec = new_codec
elif new_codec != bom_codec:
raise CodecError("CHAR codec {0} is different from BOM "
"codec {1}".format(new_codec, bom_codec))
break
return codec, bom_size | 0.00033 |
def pull(self, repository, tag=None, stream=False, auth_config=None,
decode=False, platform=None):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
stream (bool): Stream the output as a generator. Make sure to
consume the generator, otherwise pull might get cancelled.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
Returns:
(generator or str): The output
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.pull('busybox', stream=True, decode=True):
... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
"id": "e72ac664f4f0"
}
{
"status": "Pulling image (latest) from busybox, endpoint: ...",
"progressDetail": {},
"id": "e72ac664f4f0"
}
"""
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
if platform is not None:
if utils.version_lt(self._version, '1.32'):
raise errors.InvalidVersion(
'platform was only introduced in API version 1.32'
)
params['platform'] = platform
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response) | 0.001105 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.