text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def gapfill(model, universal=None, lower_bound=0.05,
penalties=None, demand_reactions=True, exchange_reactions=False,
iterations=1):
"""Perform gapfilling on a model.
See documentation for the class GapFiller.
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model, None
A universal model with reactions that can be used to complete the
model. Only gapfill considering demand and exchange reactions if
left missing.
lower_bound : float
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
iterations : int
The number of rounds of gapfilling to perform. For every iteration,
the penalty for every used reaction increases linearly. This way,
the algorithm is encouraged to search for alternative solutions
which may include previously used reactions. I.e., with enough
iterations pathways including 10 steps will eventually be reported
even if the shortest pathway is a single reaction.
exchange_reactions : bool
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool
Consider adding demand reactions for all metabolites.
Returns
-------
iterable
list of lists with on set of reactions that completes the model per
requested iteration.
Examples
--------
>>> import cobra.test as ct
>>> from cobra import Model
>>> from cobra.flux_analysis import gapfill
>>> model = ct.create_test_model("salmonella")
>>> universal = Model('universal')
>>> universal.add_reactions(model.reactions.GF6PTA.copy())
>>> model.remove_reactions([model.reactions.GF6PTA])
>>> gapfill(model, universal)
"""
gapfiller = GapFiller(model, universal=universal,
lower_bound=lower_bound, penalties=penalties,
demand_reactions=demand_reactions,
exchange_reactions=exchange_reactions)
return gapfiller.fill(iterations=iterations) | 0.000398 |
def do_build(self):
"""
Create the default version of the layer
:return: a string of the packet with the payload
"""
if not self.explicit:
self = next(iter(self))
pkt = self.self_build()
for t in self.post_transforms:
pkt = t(pkt)
pay = self.do_build_payload()
if self.raw_packet_cache is None:
return self.post_build(pkt, pay)
else:
return pkt + pay | 0.004175 |
def lv_load_areas(self):
"""Returns a generator for iterating over load_areas
Yields
------
int
generator for iterating over load_areas
"""
for load_area in sorted(self._lv_load_areas, key=lambda _: repr(_)):
yield load_area | 0.009836 |
def array_to_base64_png(array):
"""Convert an array into base64-enoded PNG image.
Args:
array: A 2D np.ndarray or nested list of items.
Returns:
A base64-encoded string the image. The image is grayscale if the array is
2D. The image is RGB color if the image is 3D with lsat dimension equal to
3.
Raises:
ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is
empty.
"""
# TODO(cais): Deal with 3D case.
# TODO(cais): If there are None values in here, replace them with all NaNs.
array = np.array(array, dtype=np.float32)
if len(array.shape) != 2:
raise ValueError(
"Expected rank-2 array; received rank-%d array." % len(array.shape))
if not np.size(array):
raise ValueError(
"Cannot encode an empty array (size: %s) as image." % (array.shape,))
is_infinity = np.isinf(array)
is_positive = array > 0.0
is_positive_infinity = np.logical_and(is_infinity, is_positive)
is_negative_infinity = np.logical_and(is_infinity,
np.logical_not(is_positive))
is_nan = np.isnan(array)
finite_indices = np.where(np.logical_and(np.logical_not(is_infinity),
np.logical_not(is_nan)))
if np.size(finite_indices):
# Finite subset is not empty.
minval = np.min(array[finite_indices])
maxval = np.max(array[finite_indices])
scaled = np.array((array - minval) / (maxval - minval) * 255,
dtype=np.uint8)
rgb = np.repeat(np.expand_dims(scaled, -1), IMAGE_COLOR_CHANNELS, axis=-1)
else:
rgb = np.zeros(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8)
# Color-code pixels that correspond to infinities and nans.
rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB
rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB
rgb[is_nan] = NAN_RGB
image_encoded = base64.b64encode(encoder.encode_png(rgb))
return image_encoded | 0.010797 |
def _add_post_data(self, request: Request):
'''Add data to the payload.'''
if self._item_session.url_record.post_data:
data = wpull.string.to_bytes(self._item_session.url_record.post_data)
else:
data = wpull.string.to_bytes(
self._processor.fetch_params.post_data
)
request.method = 'POST'
request.fields['Content-Type'] = 'application/x-www-form-urlencoded'
request.fields['Content-Length'] = str(len(data))
_logger.debug('Posting with data {0}.', data)
if not request.body:
request.body = Body(io.BytesIO())
with wpull.util.reset_file_offset(request.body):
request.body.write(data) | 0.004082 |
def _parse_message(self, data):
"""
Parses the raw message from the device.
:param data: message data
:type data: string
:raises: :py:class:`~alarmdecoder.util.InvalidMessageError`
"""
try:
_, values = data.split(':')
self.serial_number, self.value = values.split(',')
self.value = int(self.value, 16)
is_bit_set = lambda b: self.value & (1 << (b - 1)) > 0
# Bit 1 = unknown
self.battery = is_bit_set(2)
self.supervision = is_bit_set(3)
# Bit 4 = unknown
self.loop[2] = is_bit_set(5)
self.loop[1] = is_bit_set(6)
self.loop[3] = is_bit_set(7)
self.loop[0] = is_bit_set(8)
except ValueError:
raise InvalidMessageError('Received invalid message: {0}'.format(data)) | 0.004515 |
def set_string(self, string_options):
"""Set a series of properties using a string.
For example::
'fred=12, tile'
'[fred=12]'
"""
vo = ffi.cast('VipsObject *', self.pointer)
cstr = _to_bytes(string_options)
result = vips_lib.vips_object_set_from_string(vo, cstr)
return result == 0 | 0.005464 |
def reload_manifest(self, manifest):
"""
Reloads a manifest from the disk
:param manifest: The manifest to reload
"""
self._logger.debug("Reloading manifest for {}.".format(manifest.get("name", "Unnamed Plugin")))
self._manifests.remove(manifest)
self.load_manifest(manifest["path"])
self._logger.debug("Manifest reloaded.") | 0.007732 |
def z_angle_rotate(xy, theta):
"""
Rotated the input vector or set of vectors `xy` by the angle `theta`.
Parameters
----------
xy : array_like
The vector or array of vectors to transform. Must have shape
"""
xy = np.array(xy).T
theta = np.array(theta).T
out = np.zeros_like(xy)
out[...,0] = np.cos(theta)*xy[...,0] - np.sin(theta)*xy[...,1]
out[...,1] = np.sin(theta)*xy[...,0] + np.cos(theta)*xy[...,1]
return out.T | 0.014675 |
def _HandleLegacy(self, args, token=None):
"""Retrieves the stats for a hunt."""
hunt_obj = aff4.FACTORY.Open(
args.hunt_id.ToURN(), aff4_type=implementation.GRRHunt, token=token)
stats = hunt_obj.GetRunner().context.usage_stats
return ApiGetHuntStatsResult(stats=stats) | 0.003378 |
def figures(df,specs,asList=False):
"""
Generates multiple Plotly figures for a given DataFrame
Parameters:
-----------
df : DataFrame
Pandas DataFrame
specs : list(dict)
List of dictionaries with the properties
of each figure.
All properties avaialbe can be seen with
help(cufflinks.pd.DataFrame.iplot)
asList : boolean
If True, then a list of figures is returned.
Otherwise a single (merged) figure is returned.
Default : False
"""
figs=[]
for spec in specs:
figs.append(df.figure(**spec))
if asList:
return figs
else:
return merge_figures(figs) | 0.045531 |
def plot_fluxseries(
self, names: Optional[Iterable[str]] = None,
average: bool = False, **kwargs: Any) \
-> None:
"""Plot the `flux` series of the handled model.
See the documentation on method |Element.plot_inputseries| for
additional information.
"""
self.__plot(self.model.sequences.fluxes, names, average, kwargs) | 0.005063 |
async def send_contact(self, phone_number: base.String,
first_name: base.String, last_name: typing.Union[base.String, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_markup=None,
reply=True) -> Message:
"""
Use this method to send phone contacts.
Source: https://core.telegram.org/bots/api#sendcontact
:param phone_number: Contact's phone number
:type phone_number: :obj:`base.String`
:param first_name: Contact's first name
:type first_name: :obj:`base.String`
:param last_name: Contact's last name
:type last_name: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
warn_deprecated('"Message.send_contact" method will be removed in 2.2 version.\n'
'Use "Message.reply_contact" instead.',
stacklevel=8)
return await self.bot.send_contact(chat_id=self.chat.id,
phone_number=phone_number,
first_name=first_name, last_name=last_name,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None,
reply_markup=reply_markup) | 0.006951 |
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes) | 0.003842 |
def warp_object(self, tileMapObj):
"""Warp the tile map object from one warp to another."""
print "Collision"
if tileMapObj.can_warp:
#Check to see if we need to load a different tile map
if self.map_association != self.exitWarp.map_association:
#Load the new tile map.
TileMapManager.load(exitWarp.map_association)
tileMapObj.parent.coords = self.exitWarp.coords | 0.008772 |
def _init_filters(self):
"""Initialize the default pywb provided Jninja filters available during template rendering"""
self.filters = {}
@self.template_filter()
def format_ts(value, format_='%a, %b %d %Y %H:%M:%S'):
"""Formats the supplied timestamp using format_
:param str value: The timestamp to be formatted
:param str format_: The format string
:return: The correctly formatted timestamp as determined by format_
:rtype: str
"""
if format_ == '%s':
return timestamp_to_sec(value)
else:
value = timestamp_to_datetime(value)
return value.strftime(format_)
@self.template_filter('urlsplit')
def get_urlsplit(url):
"""Splits the supplied URL
:param str url: The url to be split
:return: The split url
:rtype: urllib.parse.SplitResult
"""
split = urlsplit(url)
return split
@self.template_filter()
def tojson(obj):
"""Converts the supplied object/array/any to a JSON string if it can be JSONified
:param any obj: The value to be converted to a JSON string
:return: The JSON string representation of the supplied value
:rtype: str
"""
return json.dumps(obj)
@self.template_filter()
def tobool(bool_val):
"""Converts a python boolean to a JS "true" or "false" string
:param any obj: A value to be evaluated as a boolean
:return: The string "true" or "false" to be inserted into JS
"""
return 'true' if bool_val else 'false' | 0.00226 |
def load_features(self, features, image_type=None, from_array=False,
threshold=0.001):
""" Load features from current Dataset instance or a list of files.
Args:
features: List containing paths to, or names of, features to
extract. Each element in the list must be a string containing
either a path to an image, or the name of a feature (as named
in the current Dataset). Mixing of paths and feature names
within the list is not allowed.
image_type: Optional suffix indicating which kind of image to use
for analysis. Only used if features are taken from the Dataset;
if features is a list of filenames, image_type is ignored.
from_array: If True, the features argument is interpreted as a
string pointing to the location of a 2D ndarray on disk
containing feature data, where rows are voxels and columns are
individual features.
threshold: If features are taken from the dataset, this is the
threshold passed to the meta-analysis module to generate fresh
images.
"""
if from_array:
if isinstance(features, list):
features = features[0]
self._load_features_from_array(features)
elif path.exists(features[0]):
self._load_features_from_images(features)
else:
self._load_features_from_dataset(
features, image_type=image_type, threshold=threshold) | 0.001854 |
def _send_locked(self, cmd):
"""Sends the specified command to the lutron controller.
Assumes self._lock is held.
"""
_LOGGER.debug("Sending: %s" % cmd)
try:
self._telnet.write(cmd.encode('ascii') + b'\r\n')
except BrokenPipeError:
self._disconnect_locked() | 0.010204 |
def suites(self, request, pk=None):
"""
List of test suite names available in this project
"""
suites_names = self.get_object().suites.values_list('slug')
suites_metadata = SuiteMetadata.objects.filter(kind='suite', suite__in=suites_names)
page = self.paginate_queryset(suites_metadata)
serializer = SuiteMetadataSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data) | 0.00823 |
def hilbert(self, num_taps=None):
'''Apply an odd-tap Hilbert transform filter, phase-shifting the signal
by 90 degrees. This is used in many matrix coding schemes and for
analytic signal generation. The process is often written as a
multiplication by i (or j), the imaginary unit. An odd-tap Hilbert
transform filter has a bandpass characteristic, attenuating the lowest
and highest frequencies.
Parameters
----------
num_taps : int or None, default=None
Number of filter taps - must be odd. If none, it is chosen to have
a cutoff frequency of about 75 Hz.
'''
if num_taps is not None and not isinstance(num_taps, int):
raise ValueError("num taps must be None or an odd integer.")
if num_taps is not None and num_taps % 2 == 0:
raise ValueError("num_taps must an odd integer.")
effect_args = ['hilbert']
if num_taps is not None:
effect_args.extend(['-n', '{}'.format(num_taps)])
self.effects.extend(effect_args)
self.effects_log.append('hilbert')
return self | 0.001721 |
def regionsIntersection(s1, s2, collapse=True):
"""
given two lists of genomic regions with chromosome, start and end
coordinates, return a new list of regions which is the intersection of those
two sets. Lists must be sorted by chromosome and start index
:return: new list that represents the intersection of the two input lists.
output regions will all have name "X", be on strand "+" and have
score 0
:param s1: first list of genomic regions
:param s2: second list of genomic regions
:raise GenomicIntervalError: if the input regions are not sorted correctly
(by chromosome and start index)
:note: O(n) time, O(n) space; informally, might use up to 3x space of input
"""
debug = False
# we don't need to explicitly check for sorting because sorted order is
# a post-condition of the collapsing function
s1_c = collapseRegions(s1)
s2_c = collapseRegions(s2)
if len(s1_c) == 0 or len(s2_c) == 0:
return []
res = []
j = 0
for i in range(0, len(s1_c)):
if debug:
sys.stderr.write("processing from s1_c : " + str(s1_c[i]) + "\n")
# find first thing in s2_c with end in or after s1_c[i]
if debug:
sys.stderr.write("i = " + str(i) + " and j = " + str(j) + "\n")
while (j < len(s2_c) and
(s2_c[j].chrom < s1_c[i].chrom or
(s2_c[j].chrom == s1_c[i].chrom and s2_c[j].end <= s1_c[i].start))):
j += 1
# nothing intersects if we hit the end of s2, or the end of the chrom,
# or we're still on the same chrom but start after the end of s2_c[i]
if j >= len(s2_c) or s2_c[j].chrom > s1_c[i].chrom or \
(s2_c[j].chrom == s1_c[i].chrom and s2_c[j].start >= s1_c[i].end):
continue
# now everything at or after j in s2_c that starts before
# the end of s1_c must overlap with it
while s2_c[j].start < s1_c[i].end:
s = max(s1_c[i].start, s2_c[j].start)
e = min(s1_c[i].end, s2_c[j].end)
overlap = GenomicInterval(s1_c[i].chrom, s, e, "X", 0, "+")
if debug:
sys.stderr.write("\tadding to overlaps: " + str(overlap) + "\n")
res.append(overlap)
j += 1
if j >= len(s2_c) or s2_c[j].chrom != s1_c[i].chrom:
break
# it's possible the last intersecting element runs on to the
# next element from s1_c, so...
j -= 1
if debug:
sys.stderr.write("\tmoving s2_c index back to " + str(s2_c[j]) + "\n")
return res | 0.009756 |
def _init_transforms(self, subjs, voxels, features, random_state):
"""Initialize the mappings (Wi) with random orthogonal matrices.
Parameters
----------
subjs : int
The number of subjects.
voxels : list of int
A list with the number of voxels per subject.
features : int
The number of features in the model.
random_state : `RandomState`
A random state to draw the mappings.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for
each subject.
Note
----
Not thread safe.
"""
# Init the Random seed generator
np.random.seed(self.rand_seed)
# Draw a random W for each subject
W = [random_state.random_sample((voxels[i], features))
for i in range(subjs)]
# Make it orthogonal it with QR decomposition
for i in range(subjs):
W[i], _ = np.linalg.qr(W[i])
return W | 0.001781 |
def _removeContentPanels(cls, remove):
"""
Remove the panels and so hide the fields named.
"""
if type(remove) is str:
remove = [remove]
cls.content_panels = [panel for panel in cls.content_panels
if getattr(panel, "field_name", None) not in remove] | 0.009063 |
def loads(s: str, load_module: types.ModuleType, **kwargs):
""" Convert a JSON string into a JSGObject
:param s: string representation of JSON document
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string
"""
return json.loads(s, object_hook=lambda pairs: loads_loader(load_module, pairs), **kwargs) | 0.004556 |
def file_counts(container=None,
patterns=None,
image_package=None,
file_list=None):
'''file counts will return a list of files that match one or more regular expressions.
if no patterns is defined, a default of readme is used. All patterns and files are made
case insensitive.
Parameters
==========
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param patterns: one or more patterns (str or list) of files to search for.
:param diff: the difference between a container and it's parent OS from get_diff
if not provided, will be generated.
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
if patterns == None:
patterns = 'readme'
if not isinstance(patterns,list):
patterns = [patterns]
count = 0
for pattern in patterns:
count += len([x for x in file_list if re.search(pattern.lower(),x.lower())])
bot.info("Total files matching patterns is %s" %count)
return count | 0.011102 |
def fail(self, key, **kwargs):
"""A helper method that simply raises a `ValidationError`.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
msg = MISSING_ERROR_MESSAGE.format(class_name=class_name,
key=key)
raise AssertionError(msg)
if isinstance(msg, str):
msg = msg.format(**kwargs)
raise exceptions.ValidationError(msg, self.field_name) | 0.003717 |
def flush(name, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Flush current ipset set
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
set_check = __salt__['ipset.check_set'](name)
if set_check is False:
ret['result'] = False
ret['comment'] = ('ipset set {0} does not exist for {1}'
.format(name, family))
return ret
if __opts__['test']:
ret['comment'] = 'ipset entries in set {0} for {1} would be flushed'.format(
name,
family)
return ret
if __salt__['ipset.flush'](name, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Flushed ipset entries from set {0} for {1}'.format(
name,
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to flush ipset entries from set {0} for {1}' \
''.format(name, family)
return ret | 0.001756 |
def leave(self, screen_id):
"""Informs the target about a drag and drop leave event.
in screen_id of type int
The screen ID where the drag and drop event occurred.
raises :class:`VBoxErrorVmError`
VMM device is not available.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
self._call("leave",
in_p=[screen_id]) | 0.01002 |
def count_fingerprint(word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG):
"""Return the count fingerprint.
This is a wrapper for :py:meth:`Count.fingerprint`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The count fingerprint
Examples
--------
>>> bin(count_fingerprint('hat'))
'0b1010000000001'
>>> bin(count_fingerprint('niall'))
'0b10001010000'
>>> bin(count_fingerprint('colin'))
'0b101010000'
>>> bin(count_fingerprint('atcg'))
'0b1010000000000'
>>> bin(count_fingerprint('entreatment'))
'0b1111010000100000'
"""
return Count().fingerprint(word, n_bits, most_common) | 0.001135 |
def get_queryset(self, request):
"""
Make special filtering by user's permissions.
"""
if not request.user.has_perm('zinnia.can_view_all'):
queryset = self.model.objects.filter(authors__pk=request.user.pk)
else:
queryset = super(EntryAdmin, self).get_queryset(request)
return queryset.prefetch_related('categories', 'authors', 'sites') | 0.004914 |
def add_library(self, name):
"""Add a library to the database
This method is for adding a library by name (eg: "BuiltIn")
rather than by a file.
"""
libdoc = LibraryDocumentation(name)
if len(libdoc.keywords) > 0:
# FIXME: figure out the path to the library file
collection_id = self.add_collection(None, libdoc.name, libdoc.type,
libdoc.doc, libdoc.version,
libdoc.scope, libdoc.named_args,
libdoc.doc_format)
self._load_keywords(collection_id, libdoc=libdoc) | 0.004354 |
def _zlib_no_compress(data):
"""Compress data with zlib level 0."""
cobj = zlib.compressobj(0)
return b"".join([cobj.compress(data), cobj.flush()]) | 0.006289 |
def infer_call_result(self, caller, context=None):
"""infer what a class instance is returning when called"""
context = contextmod.bind_context_to_node(context, self)
inferred = False
for node in self._proxied.igetattr("__call__", context):
if node is util.Uninferable or not node.callable():
continue
for res in node.infer_call_result(caller, context):
inferred = True
yield res
if not inferred:
raise exceptions.InferenceError(node=self, caller=caller, context=context) | 0.005042 |
def commit(self):
"""
Insert the specified text in all selected lines, always
at the same column position.
"""
# Get the number of lines and columns in last line.
last_line, last_col = self.qteWidget.getNumLinesAndColumns()
# If this is the first ever call to this undo/redo element
# then backup the current cursor- and marker position because
# both will be required for the redo operation.
if self.cursorPos is None:
# Get the default marker and ensure it points to a
# valid location inside the document.
self.markerPos = self.qteWidget.qteGetMark()
if self.markerPos is None:
return
if not self.qteWidget.isPositionValid(*self.markerPos):
return
# Backup the current cursor and marker position; swap
# one for the other if necessary to ensure the marker
# comes first.
self.cursorPos = self.qteWidget.getCursorPosition()
if self.cursorPos[0] < self.markerPos[0]:
self.cursorPos, self.markerPos = self.markerPos, self.cursorPos
# Shorthand for qteWidget and left/right position of rectangle.
wid = self.qteWidget
col1 = min((self.markerPos[1], self.cursorPos[1]))
col2 = max((self.markerPos[1], self.cursorPos[1]))
# Insert the specified string at the same position in every line
# in between the mark and the cursor (inclusive).
self.removedText = []
for line in range(self.markerPos[0], self.cursorPos[0] + 1):
text = wid.text(line)
if col1 >= len(text):
# If the line has no text in the specified column
# range then ignore it.
self.removedText.append('')
continue
if col2 > len(text):
# If the col1-col2 range spans only part of the
# line then select only that part.
wid.setSelection(line, col1, line, col1)
wid.SendScintilla(wid.SCI_LINEENDEXTEND, 0, 0)
else:
# If the col1-col2 range is a subset of the entire
# line then select the entire range.
wid.setSelection(line, col1, line, col2)
# Backup and remove the selected text.
self.removedText.append(self.qteWidget.selectedText())
self.baseClass.removeSelectedText()
self.qteWidget.setCursorPosition(self.cursorPos[0], self.markerPos[1])
# Determine the user selected string length and initialise the global
# variable qteKilledTextFromRectangle with an empty dictionary.
strlen = col2 - col1
global qteKilledTextFromRectangle
qteKilledTextFromRectangle = []
# Copy the removed text into the global variable
# 'qteKilledTextFromRectangle' so that YankRectangle can
# access it. However, ensure that every element has exactly
# the length specified by the user defined rectangle; zero pad
# elements that are too short. Note: do not apply this zero
# padding to self.removedText because otherwise the text could
# not be undone correctly.
for el in self.removedText:
# Determine how many white space characters are required
# to make the string 'strLen' characters long.
pad = strlen - len(el)
# Sanity check.
if pad < 0:
qteKillTextFromRectangle = None
self.qteWidget.setCursorPosition(*self.cursorPos)
self.cursorPos = self.markerPos = None
msg = 'Padding length cannot be negative --> this is a bug'
self.qteLogger.error(msg)
return
# Store the padded version of the string.
qteKilledTextFromRectangle.append(el + ' ' * pad) | 0.000505 |
def login_required(obj):
"""
Requires that the user be logged in order to gain access to the resource
at the specified the URI.
"""
decorator = request_passes_test(lambda r, *args, **kwargs: r.user.is_authenticated())
return wrap_object(obj, decorator) | 0.007246 |
def generate(self, callback=None):
"""
Computes and stores piece data. Returns ``True`` on success, ``False``
otherwise.
:param callback: progress/cancellation callable with method
signature ``(filename, pieces_completed, pieces_total)``.
Useful for reporting progress if dottorrent is used in a
GUI/threaded context, and if torrent generation needs to be cancelled.
The callable's return value should evaluate to ``True`` to trigger
cancellation.
"""
files = []
single_file = os.path.isfile(self.path)
if single_file:
files.append((self.path, os.path.getsize(self.path), {}))
elif os.path.exists(self.path):
for x in os.walk(self.path):
for fn in x[2]:
if any(fnmatch.fnmatch(fn, ext) for ext in self.exclude):
continue
fpath = os.path.normpath(os.path.join(x[0], fn))
fsize = os.path.getsize(fpath)
if fsize and not is_hidden_file(fpath):
files.append((fpath, fsize, {}))
else:
raise exceptions.InvalidInputException
total_size = sum([x[1] for x in files])
if not (len(files) and total_size):
raise exceptions.EmptyInputException
# set piece size if not already set
if self.piece_size is None:
self.piece_size = self.get_info()[2]
if files:
self._pieces = bytearray()
i = 0
num_pieces = math.ceil(total_size / self.piece_size)
pc = 0
buf = bytearray()
while i < len(files):
fe = files[i]
f = open(fe[0], 'rb')
if self.include_md5:
md5_hasher = md5()
else:
md5_hasher = None
for chunk in iter(lambda: f.read(self.piece_size), b''):
buf += chunk
if len(buf) >= self.piece_size \
or i == len(files)-1:
piece = buf[:self.piece_size]
self._pieces += sha1(piece).digest()
del buf[:self.piece_size]
pc += 1
if callback:
cancel = callback(fe[0], pc, num_pieces)
if cancel:
f.close()
return False
if self.include_md5:
md5_hasher.update(chunk)
if self.include_md5:
fe[2]['md5sum'] = md5_hasher.hexdigest()
f.close()
i += 1
# Add pieces from any remaining data
while len(buf):
piece = buf[:self.piece_size]
self._pieces += sha1(piece).digest()
del buf[:self.piece_size]
pc += 1
if callback:
cancel = callback(fe[0], pc, num_pieces)
if cancel:
return False
# Create the torrent data structure
data = OrderedDict()
if len(self.trackers) > 0:
data['announce'] = self.trackers[0].encode()
if len(self.trackers) > 1:
data['announce-list'] = [[x.encode()] for x in self.trackers]
if self.comment:
data['comment'] = self.comment.encode()
if self.created_by:
data['created by'] = self.created_by.encode()
else:
data['created by'] = DEFAULT_CREATOR.encode()
if self.creation_date:
data['creation date'] = int(self.creation_date.timestamp())
if self.web_seeds:
data['url-list'] = [x.encode() for x in self.web_seeds]
data['info'] = OrderedDict()
if single_file:
data['info']['length'] = files[0][1]
if self.include_md5:
data['info']['md5sum'] = files[0][2]['md5sum']
data['info']['name'] = files[0][0].split(os.sep)[-1].encode()
else:
data['info']['files'] = []
path_sp = self.path.split(os.sep)
for x in files:
fx = OrderedDict()
fx['length'] = x[1]
if self.include_md5:
fx['md5sum'] = x[2]['md5sum']
fx['path'] = [y.encode()
for y in x[0].split(os.sep)[len(path_sp):]]
data['info']['files'].append(fx)
data['info']['name'] = path_sp[-1].encode()
data['info']['pieces'] = bytes(self._pieces)
data['info']['piece length'] = self.piece_size
data['info']['private'] = int(self.private)
if self.source:
data['info']['source'] = self.source.encode()
self._data = data
return True | 0.0006 |
def hide_routemap_holder_route_map_content_match_extcommunity_extcommunity_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
match = ET.SubElement(content, "match")
extcommunity = ET.SubElement(match, "extcommunity")
extcommunity_num = ET.SubElement(extcommunity, "extcommunity-num")
extcommunity_num.text = kwargs.pop('extcommunity_num')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003697 |
def get_instance_from_id(unique_id):
"""Get an instance of the `PolygonFilter` using a unique id"""
for instance in PolygonFilter.instances:
if instance.unique_id == unique_id:
return instance
# if this does not work:
raise KeyError("PolygonFilter with unique_id {} not found.".
format(unique_id)) | 0.005263 |
def trace_dispatch(self, frame, event, arg):
"""allow to switch to Pdb instance"""
if hasattr(self, 'pdb'):
return self.pdb.trace_dispatch(frame, event, arg)
else:
return Pdb.trace_dispatch(self, frame, event, arg) | 0.007634 |
def find_external_metabolites(model):
"""Return all metabolites in the external compartment."""
ex_comp = find_external_compartment(model)
return [met for met in model.metabolites if met.compartment == ex_comp] | 0.004505 |
def get_cif(code, mmol_number, outfile=None):
"""
Parameters
----------
code : str
PDB code.
mmol_number : int
mmol number (biological assembly number) of file to download. Numbers from PDBe.
If None, defaults to the preferred biological assembly listed for code on the PDBe.
outfile : str
Filepath. Writes returned value to this file.
Returns
-------
cif_string : str, or None
Content of the cif file as a string.
None if unable to download the cif_file from the pdbe site.
"""
pdbe_url = "http://www.ebi.ac.uk/pdbe/static/entry/download/{0}-assembly-{1}.cif.gz".format(code, mmol_number)
r = requests.get(pdbe_url)
if r.status_code == 200:
temp_gz = tempfile.NamedTemporaryFile()
temp_gz.write(r.content)
with gzip.open(temp_gz.name, 'rb') as foo:
cif_string = foo.read().decode()
else:
print("Could not download cif file for {0}".format(code))
return None
# Write to file.
if outfile and cif_string:
with open(outfile, 'w') as foo:
foo.write(cif_string)
return cif_string | 0.003439 |
def post(self):
'''This handles POST requests.
Saves the changes made by the user on the frontend back to the current
checkplot-list.json file.
'''
# if self.readonly is set, then don't accept any changes
# return immediately with a 400
if self.readonly:
msg = "checkplotserver is in readonly mode. no updates allowed."
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
objectid = self.get_argument('objectid', None)
changes = self.get_argument('changes',None)
# if either of the above is invalid, return nothing
if not objectid or not changes:
msg = ("could not parse changes to the checkplot filelist "
"from the frontend")
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# otherwise, update the checkplot list JSON
objectid = xhtml_escape(objectid)
changes = json.loads(changes)
# update the dictionary
if 'reviewed' not in self.currentproject:
self.currentproject['reviewed'] = {}
self.currentproject['reviewed'][objectid] = changes
# update the JSON file
with open(self.cplistfile,'w') as outfd:
json.dump(self.currentproject, outfd)
# return status
msg = ("wrote all changes to the checkplot filelist "
"from the frontend for object: %s" % objectid)
LOGGER.info(msg)
resultdict = {'status':'success',
'message':msg,
'readonly':self.readonly,
'result':{'objectid':objectid,
'changes':changes}}
self.write(resultdict)
self.finish() | 0.009225 |
def _unpickle_collection(self, collection):
"""Unpickles all members of the specified dictionary."""
for mkey in collection:
if isinstance(collection[mkey], list):
for item in collection[mkey]:
item.unpickle(self)
else:
collection[mkey].unpickle(self) | 0.005831 |
def percentOverlap(x1, x2):
"""
Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2
"""
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
percentOverlap = 0
minX1X2 = min(nonZeroX1, nonZeroX2)
if minX1X2 > 0:
overlap = float(np.dot(x1.T, x2))
percentOverlap = overlap / minX1X2
return percentOverlap | 0.014706 |
def _read_response(self, response):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
"""
rclass = response['rclass']
if rclass != "virtual":
raise ArtifactoryException("Repositiry '{}' have '{}', but expect 'virtual'".format(self.name, rclass))
self.name = response["key"]
self.description = response["description"]
self.packageType = response["packageType"]
self._repositories = response["repositories"] | 0.007421 |
def _add_io_handler(self, handler):
"""Add an I/O handler to the loop."""
logger.debug('adding io handler: %r', handler)
self._unprepared_handlers[handler] = None
self._configure_io_handler(handler) | 0.008696 |
def run(self,
force=False,
ipyclient=None,
name_fields=30,
name_separator="_",
dry_run=False):
"""
Download the accessions into a the designated workdir.
Parameters
----------
force: (bool)
If force=True then existing files with the same name
will be overwritten.
ipyclient: (ipyparallel.Client)
If provided, work will be distributed across a parallel
client, otherwise download will be run on a single core.
name_fields: (int, str):
Provide the index of the name fields to be used as a prefix
for fastq output files. The default is 30, which is the
SampleName field. Use sra.fetch_fields to see all available
fields and their indices. A likely alternative is 1 (Run).
If multiple are listed then they will be joined by a "_"
character. For example (29,30) would yield something like:
latin-name_sample-name (e.g., mus_musculus-NR10123).
dry_run: (bool)
If True then a table of file names that _would_ be downloaded
will be shown, but the actual files will note be downloaded.
"""
## temporarily set directory for tmpfiles used by fastq-dump
## if this fails then just skip it.
try:
## ensure output directory, also used as tmpdir
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
## get original directory for sra files
## probably /home/ncbi/public/sra by default.
self._set_vdbconfig_path()
## register ipyclient for cleanup
if ipyclient:
self._ipcluster["pids"] = {}
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
pid = engine.apply(os.getpid).get()
self._ipcluster["pids"][eid] = pid
## submit jobs to engines or local
self._submit_jobs(
force=force,
ipyclient=ipyclient,
name_fields=name_fields,
name_separator=name_separator,
dry_run=dry_run,
)
except IPyradWarningExit as inst:
print(inst)
## exceptions to catch, cleanup and handle ipyclient interrupts
except KeyboardInterrupt:
print("keyboard interrupt...")
except Exception as inst:
print("Exception in run() - {}".format(inst))
finally:
## reset working sra path
self._restore_vdbconfig_path()
## if it made a new sra directory then it should be empty when
## we are finished if all .sra files were removed. If so, then
## let's also remove the dir. if not empty, leave it.
sradir = os.path.join(self.workdir, "sra")
if os.path.exists(sradir) and (not os.listdir(sradir)):
shutil.rmtree(sradir)
else:
## print warning
try:
print(FAILED_DOWNLOAD.format(os.listdir(sradir)))
except OSError as inst:
## If sra dir doesn't even exist something very bad is broken.
raise IPyradWarningExit("Download failed. Exiting.")
## remove fastq file matching to cached sra file
for srr in os.listdir(sradir):
isrr = srr.split(".")[0]
ipath = os.path.join(self.workdir, "*_{}*.gz".format(isrr))
ifile = glob.glob(ipath)[0]
if os.path.exists(ifile):
os.remove(ifile)
## remove cache of sra files
shutil.rmtree(sradir)
## cleanup ipcluster shutdown
if ipyclient:
## send SIGINT (2) to all engines still running tasks
try:
ipyclient.abort()
time.sleep(0.5)
for engine_id, pid in self._ipcluster["pids"].items():
if ipyclient.queue_status()[engine_id]["tasks"]:
os.kill(pid, 2)
time.sleep(0.1)
except ipp.NoEnginesRegistered:
pass
## clean memory space
if not ipyclient.outstanding:
ipyclient.purge_everything()
## uh oh, kill everything, something bad happened
else:
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
print("\nwarning: ipcluster shutdown and must be restarted") | 0.008396 |
def all_active(cls):
""" List active queues, based on their lengths in Redis. Warning, uses the unscalable KEYS redis command """
prefix = context.get_current_config()["redis_prefix"]
queues = []
for key in context.connections.redis.keys():
if key.startswith(prefix):
queues.append(Queue(key[len(prefix) + 3:]))
return queues | 0.007595 |
def create_option_value(cls, option_value, **kwargs):
"""Create OptionValue
Create a new OptionValue
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_option_value(option_value, async=True)
>>> result = thread.get()
:param async bool
:param OptionValue option_value: Attributes of optionValue to create (required)
:return: OptionValue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_option_value_with_http_info(option_value, **kwargs)
else:
(data) = cls._create_option_value_with_http_info(option_value, **kwargs)
return data | 0.005556 |
def head(records, head):
"""
Limit results to the top N records.
With the leading `-', print all but the last N records.
"""
logging.info('Applying _head generator: '
'limiting results to top ' + head + ' records.')
if head == '-0':
for record in records:
yield record
elif '-' in head:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
end_index = max(record_count + int(head), 0)
for record in itertools.islice(r(), end_index):
yield record
else:
for record in itertools.islice(records, int(head)):
yield record | 0.001453 |
def create_nsg_rule(access_token, subscription_id, resource_group, nsg_name, nsg_rule_name,
description, protocol='Tcp', source_range='*', destination_range='*',
source_prefix='*', destination_prefix='*', access='Allow', priority=100,
direction='Inbound'):
'''Create network security group rule.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
nsg_name (str): Name of the Network Security Group.
nsg_rule_name (str): Name of the new rule.
description (str): Description.
protocol (str): Optional protocol. Default Tcp.
source_range (str): Optional source IP range. Default '*'.
destination_range (str): Destination IP range. Default *'.
source_prefix (str): Source DNS prefix. Default '*'.
destination_prefix (str): Destination prefix. Default '*'.
access (str): Allow or deny rule. Default Allow.
priority: Relative priority. Default 100.
direction: Inbound or Outbound. Default Inbound.
Returns:
HTTP response. NSG JSON rule body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/networkSecurityGroups/', nsg_name,
'/securityRules/', nsg_rule_name,
'?api-version=', NETWORK_API])
properties = {'description': description}
properties['protocol'] = protocol
properties['sourcePortRange'] = source_range
properties['destinationPortRange'] = destination_range
properties['sourceAddressPrefix'] = source_prefix
properties['destinationAddressPrefix'] = destination_prefix
properties['access'] = access
properties['priority'] = priority
properties['direction'] = direction
ip_body = {'properties': properties}
body = json.dumps(ip_body)
return do_put(endpoint, body, access_token) | 0.002326 |
def set_tags(name, tags, region=None, key=None, keyid=None, profile=None):
'''
Add the tags on an ELB
.. versionadded:: 2016.3.0
name
name of the ELB
tags
dict of name/value pair tags
CLI Example:
.. code-block:: bash
salt myminion boto_elb.set_tags my-elb-name "{'Tag1': 'Value', 'Tag2': 'Another Value'}"
'''
if exists(name, region, key, keyid, profile):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = _add_tags(conn, name, tags)
return ret
else:
return False | 0.003367 |
def add_scanner_param(self, name, scanner_param):
""" Add a scanner parameter. """
assert name
assert scanner_param
self.scanner_params[name] = scanner_param
command = self.commands.get('start_scan')
command['elements'] = {
'scanner_params':
{k: v['name'] for k, v in self.scanner_params.items()}} | 0.005348 |
def learn(self, bottomUpInput, enableInference=None):
"""
TODO: document
:param bottomUpInput:
:param enableInference:
:return:
"""
return self.compute(bottomUpInput, enableLearn=True,
enableInference=enableInference) | 0.014652 |
def packet_read(self):
"""Read packet from network."""
bytes_received = 0
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
if self.in_packet.command == 0:
ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)
if errnum == 0 and len(ba_data) == 1:
bytes_received += 1
byte = ba_data[0]
self.in_packet.command = byte
if self.as_broker:
if self.bridge is None and self.state == NC.CS_NEW and (byte & 0xF0) != NC.CMD_CONNECT:
print "RETURN ERR_PROTOCOL"
return NC.ERR_PROTOCOL, bytes_received
else:
if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:
return NC.ERR_SUCCESS, bytes_received
elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:
return NC.ERR_CONN_LOST, bytes_received
else:
evt = event.EventNeterr(errnum, errmsg)
self.push_event(evt)
return NC.ERR_UNKNOWN, bytes_received
if not self.in_packet.have_remaining:
loop_flag = True
while loop_flag:
ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)
if errnum == 0 and len(ba_data) == 1:
byte = ba_data[0]
bytes_received += 1
self.in_packet.remaining_count += 1
if self.in_packet.remaining_count > 4:
return NC.ERR_PROTOCOL, bytes_received
self.in_packet.remaining_length += (byte & 127) * self.in_packet.remaining_mult
self.in_packet.remaining_mult *= 128
else:
if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:
return NC.ERR_SUCCESS, bytes_received
elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:
return NC.ERR_CONN_LOST, bytes_received
else:
evt = event.EventNeterr(errnum, errmsg)
self.push_event(evt)
return NC.ERR_UNKNOWN, bytes_received
if (byte & 128) == 0:
loop_flag = False
if self.in_packet.remaining_length > 0:
self.in_packet.payload = bytearray(self.in_packet.remaining_length)
if self.in_packet.payload is None:
return NC.ERR_NO_MEM, bytes_received
self.in_packet.to_process = self.in_packet.remaining_length
self.in_packet.have_remaining = True
if self.in_packet.to_process > 0:
ba_data, errnum, errmsg = nyamuk_net.read(self.sock, self.in_packet.to_process)
if errnum == 0 and len(ba_data) > 0:
readlen = len(ba_data)
bytes_received += readlen
for idx in xrange(0, readlen):
self.in_packet.payload[self.in_packet.pos] = ba_data[idx]
self.in_packet.pos += 1
self.in_packet.to_process -= 1
else:
if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:
return NC.ERR_SUCCESS, bytes_received
elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:
return NC.ERR_CONN_LOST, bytes_received
else:
evt = event.EventNeterr(errnum, errmsg)
self.push_event(evt)
return NC.ERR_UNKNOWN, bytes_received
#all data for this packet is read
self.in_packet.pos = 0
ret = self.packet_handle()
self.in_packet.packet_cleanup()
self.last_msg_in = time.time()
return ret, bytes_received | 0.006279 |
def _game_image_from_screen(self, game_type):
"""Return the image of the given game type from the screen.
Return None if no game is found.
"""
# screen
screen_img = self._screen_shot()
# game image
game_rect = self._game_finders[game_type].locate_in(screen_img)
if game_rect is None:
return
t, l, b, r = game_rect
game_img = screen_img[t:b, l:r]
return game_img | 0.004338 |
def parse_release_id(release_id):
"""
Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict
"""
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result | 0.001739 |
def packages(
state, host,
packages=None, present=True, latest=False,
update=False, cache_time=None, upgrade=False,
force=False, no_recommends=False,
allow_downgrades=False,
):
'''
Install/remove/update packages & update apt.
+ packages: list of packages to ensure
+ present: whether the packages should be installed
+ latest: whether to upgrade packages without a specified version
+ update: run apt update
+ cache_time: when used with update, cache for this many seconds
+ upgrade: run apt upgrade
+ force: whether to force package installs by passing `--force-yes` to apt
+ no_recommends: don't install recommended packages
+ allow_downgrades: allow downgrading packages with version (--allow-downgrades)
Versions:
Package versions can be pinned like apt: ``<pkg>=<version>``
Cache time:
When ``cache_time`` is set the ``/var/lib/apt/periodic/update-success-stamp`` file
is touched upon successful update. Some distros already do this (Ubuntu), but others
simply leave the periodic directory empty (Debian).
'''
if update:
yield _update(state, host, cache_time=cache_time)
if upgrade:
yield _upgrade(state, host)
install_command = 'install'
if no_recommends is True:
install_command += ' --no-install-recommends'
if allow_downgrades:
install_command += ' --allow-downgrades'
# Compare/ensure packages are present/not
yield ensure_packages(
packages, host.fact.deb_packages, present,
install_command=noninteractive_apt(install_command, force=force),
uninstall_command=noninteractive_apt('remove', force=force),
upgrade_command=noninteractive_apt(install_command, force=force),
version_join='=',
latest=latest,
) | 0.002174 |
def transformer_parsing_base():
"""HParams for parsing on WSJ only."""
hparams = transformer_base()
hparams.attention_dropout = 0.2
hparams.layer_prepostprocess_dropout = 0.2
hparams.max_length = 512
hparams.learning_rate_warmup_steps = 16000
hparams.hidden_size = 1024
hparams.learning_rate = 0.05
hparams.shared_embedding_and_softmax_weights = False
return hparams | 0.028497 |
def populate_unique_identifiers(self, metamodel):
'''
Populate a *metamodel* with class unique identifiers previously
encountered from input.
'''
for stmt in self.statements:
if isinstance(stmt, CreateUniqueStmt):
metamodel.define_unique_identifier(stmt.kind, stmt.name,
*stmt.attributes) | 0.007353 |
def start_search(self):
"""
Start the Gateway Search Request and return the address information
:rtype: (string,int)
:return: a tuple(string(IP),int(Port) when found or None when
timeout occurs
"""
self._asyncio_loop = asyncio.get_event_loop()
# Creating Broadcast Receiver
coroutine_listen = self._asyncio_loop.create_datagram_endpoint(
lambda: self.KNXSearchBroadcastReceiverProtocol(
self._process_response,
self._timeout_handling,
self._timeout,
self._asyncio_loop
), local_addr=(self._broadcast_ip_address, 0)
)
self._listener_transport, listener_protocol = \
self._asyncio_loop.run_until_complete(coroutine_listen)
# We are ready to fire the broadcast message
coroutine_broadcaster = self._asyncio_loop.create_datagram_endpoint(
lambda: self.KNXSearchBroadcastProtocol(
self._asyncio_loop,
self._listener_transport.get_extra_info('sockname')
[1]),
remote_addr=(self._broadcast_address, self._broadcast_port))
self._broadcaster_transport, broadcast_protocol = \
self._asyncio_loop.run_until_complete(coroutine_broadcaster)
# Waiting for all Broadcast receive or timeout
self._asyncio_loop.run_forever()
# Got Response or Timeout
if self._resolved_gateway_ip_address is None and \
self._resolved_gateway_ip_port is None:
LOGGER.debug("Gateway not found!")
return None
else:
LOGGER.debug("Gateway found at %s:%s",
self._resolved_gateway_ip_address,
self._resolved_gateway_ip_port)
return self._resolved_gateway_ip_address, \
self._resolved_gateway_ip_port | 0.001036 |
def create_authentication_string(username, password):
'''
Creates an authentication string from the username and password.
:username: Username.
:password: Password.
:return: The encoded string.
'''
username_utf8 = username.encode('utf-8')
userpw_utf8 = password.encode('utf-8')
username_perc = quote(username_utf8)
userpw_perc = quote(userpw_utf8)
authinfostring = username_perc + ':' + userpw_perc
authinfostring_base64 = base64.b64encode(authinfostring.encode('utf-8')).decode('utf-8')
return authinfostring_base64 | 0.003503 |
def clean_password(self, password, user=None):
"""
Validates a password. You can hook into this if you want to
restric the allowed password choices.
"""
min_length = app_settings.PASSWORD_MIN_LENGTH
if min_length and len(password) < min_length:
raise forms.ValidationError(_("Password must be a minimum of {0} "
"characters.").format(min_length))
validate_password(password, user)
return password | 0.003891 |
def cql_encode_float(self, val):
"""
Encode floats using repr to preserve precision
"""
if math.isinf(val):
return 'Infinity' if val > 0 else '-Infinity'
elif math.isnan(val):
return 'NaN'
else:
return repr(val) | 0.00678 |
def unique_categories(categories):
"""Pass array-like categories, return sorted cleaned unique categories."""
categories = np.unique(categories)
categories = np.setdiff1d(categories, np.array(settings.categories_to_ignore))
categories = np.array(natsorted(categories, key=lambda v: v.upper()))
return categories | 0.006042 |
def handle(self, *args, **options):
"""
Transmit the courseware data for the EnterpriseCustomer(s) to the active integration channels.
"""
username = options['catalog_user']
# Before we do a whole bunch of database queries, make sure that the user we were passed exists.
try:
User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError('A user with the username {} was not found.'.format(username))
channels = self.get_integrated_channels(options)
for channel in channels:
channel_code = channel.channel_code()
channel_pk = channel.pk
transmit_content_metadata.delay(username, channel_code, channel_pk) | 0.006596 |
def hll_count(expr, error_rate=0.01, splitter=None):
"""
Calculate HyperLogLog count
:param expr:
:param error_rate: error rate
:type error_rate: float
:param splitter: the splitter to split the column value
:return: sequence or scalar
:Example:
>>> df = DataFrame(pd.DataFrame({'a': np.random.randint(100000, size=100000)}))
>>> df.a.hll_count()
63270
>>> df.a.nunique()
63250
"""
# to make the class pickled right by the cloudpickle
with open(os.path.join(path, 'lib', 'hll.py')) as hll_file:
local = {}
six.exec_(hll_file.read(), local)
HyperLogLog = local['HyperLogLog']
return expr.agg(HyperLogLog, rtype=types.int64, args=(error_rate, splitter)) | 0.003989 |
def mid_lvl_cmds_encode(self, target, hCommand, uCommand, rCommand):
'''
Mid Level commands sent from the GS to the autopilot. These are only
sent when being operated in mid-level commands mode
from the ground.
target : The system setting the commands (uint8_t)
hCommand : Commanded Altitude in meters (float)
uCommand : Commanded Airspeed in m/s (float)
rCommand : Commanded Turnrate in rad/s (float)
'''
return MAVLink_mid_lvl_cmds_message(target, hCommand, uCommand, rCommand) | 0.008439 |
def Mersmann_Kind_predictor(atoms, coeff=3.645, power=0.5,
covalent_radii=rcovs_Mersmann_Kind):
r'''Predicts the critical molar volume of a chemical based only on its
atomic composition according to [1]_ and [2]_. This is a crude approach,
but provides very reasonable
estimates in practice. Optionally, the `coeff` used and the `power` in the
fraction as well as the atomic contributions can be adjusted; this method
is general and atomic contributions can be regressed to predict other
properties with this routine.
.. math::
\frac{\left(\frac{V_c}{n_a N_A}\right)^{1/3}}{d_a}
= \frac{3.645}{\left(\frac{r_a}{r_H}\right)^{1/2}}
r_a = d_a/2
d_a = 2 \frac{\sum_i (n_i r_i)}{n_a}
In the above equations, :math:`n_i` is the number of atoms of species i in
the molecule, :math:`r_i` is the covalent atomic radius of the atom, and
:math:`n_a` is the total number of atoms in the molecule.
Parameters
----------
atoms : dict
Dictionary of atoms and their counts, [-]
coeff : float, optional
Coefficient used in the relationship, [m^2]
power : float, optional
Power applied to the relative atomic radius, [-]
covalent_radii : dict or indexable, optional
Object which can be indexed to atomic contrinbutions (by symbol), [-]
Returns
-------
Vc : float
Predicted critical volume of the chemical, [m^3/mol]
Notes
-----
Using the :obj:`thermo.elements.periodic_table` covalent radii (from RDKit),
the coefficient and power should be 4.261206523632586 and 0.5597281770786228
respectively for best results.
Examples
--------
Prediction of critical volume of decane:
>>> Mersmann_Kind_predictor({'C': 10, 'H': 22})
0.0005851859052024729
This is compared against the experimental value, 0.000624 (a 6.2% relative
error)
Using custom fitted coefficients we can do a bit better:
>>> from thermo.critical import rcovs_regressed
>>> Mersmann_Kind_predictor({'C': 10, 'H': 22}, coeff=4.261206523632586,
... power=0.5597281770786228, covalent_radii=rcovs_regressed)
0.0005956871011923075
The relative error is only 4.5% now. This is compared to an experimental
uncertainty of 5.6%.
Evaluating 1321 critical volumes in the database, the average relative
error is 5.0%; standard deviation 6.8%; and worst value of 79% relative
error for phosphorus.
References
----------
.. [1] Mersmann, Alfons, and Matthias Kind. "Correlation for the Prediction
of Critical Molar Volume." Industrial & Engineering Chemistry Research,
October 16, 2017. https://doi.org/10.1021/acs.iecr.7b03171.
.. [2] Mersmann, Alfons, and Matthias Kind. "Prediction of Mechanical and
Thermal Properties of Pure Liquids, of Critical Data, and of Vapor
Pressure." Industrial & Engineering Chemistry Research, January 31,
2017. https://doi.org/10.1021/acs.iecr.6b04323.
'''
H_RADIUS_COV = covalent_radii['H']
tot = 0
atom_count = 0
for atom, count in atoms.items():
if atom not in covalent_radii:
raise Exception('Atom %s is not supported by the supplied dictionary' %atom)
tot += count*covalent_radii[atom]
atom_count += count
da = 2.*tot/atom_count
ra = da/2.
da_SI = da*1e-10 # Convert from angstrom to m
return ((coeff/(ra/H_RADIUS_COV)**power)*da_SI)**3*N_A*atom_count | 0.007771 |
def register_cache_buster(self, app, config=None):
"""
Register `app` in cache buster so that `url_for` adds a unique prefix
to URLs generated for the `'static'` endpoint. Also make the app able
to serve cache-busted static files.
This allows setting long cache expiration values on static resources
because whenever the resource changes, so does its URL.
"""
if not (config is None or isinstance(config, dict)):
raise ValueError("`config` must be an instance of dict or None")
bust_map = {} # map from an unbusted filename to a busted one
unbust_map = {} # map from a busted filename to an unbusted one
# http://flask.pocoo.org/docs/0.12/api/#flask.Flask.static_folder
app.logger.debug('Starting computing hashes for static assets')
# compute (un)bust tables.
for dirpath, dirnames, filenames in os.walk(app.static_folder):
for filename in filenames:
# compute version component
rooted_filename = os.path.join(dirpath, filename)
if not self.__is_file_to_be_busted(rooted_filename):
continue
app.logger.debug(f'Computing hashes for {rooted_filename}')
with open(rooted_filename, 'rb') as f:
version = hashlib.md5(
f.read()
).hexdigest()[:self.hash_size]
# add version
unbusted = os.path.relpath(rooted_filename, app.static_folder)
# busted = os.path.join(version, unbusted)
busted = f"{unbusted}?q={version}"
# save computation to map
bust_map[unbusted] = busted
unbust_map[busted] = unbusted
app.logger.debug('Finished Starting computing hashes for static assets')
def bust_filename(file):
return bust_map.get(file, file)
def unbust_filename(file):
return unbust_map.get(file, file)
@app.url_defaults
def reverse_to_cache_busted_url(endpoint, values):
"""
Make `url_for` produce busted filenames when using the 'static'
endpoint.
"""
if endpoint == 'static':
values['filename'] = bust_filename(values['filename'])
def debusting_static_view(*args, **kwargs):
"""
Serve a request for a static file having a busted name.
"""
kwargs['filename'] = unbust_filename(kwargs.get('filename'))
return original_static_view(*args, **kwargs)
# Replace the default static file view with our debusting view.
original_static_view = app.view_functions['static']
app.view_functions['static'] = debusting_static_view | 0.001052 |
def clean_inputs(data):
"""Clean BED input files to avoid overlapping segments that cause downstream issues.
Per-merges inputs to avoid needing to call multiple times during later parallel steps.
"""
if not utils.get_in(data, ("config", "algorithm", "variant_regions_orig")):
data["config"]["algorithm"]["variant_regions_orig"] = dd.get_variant_regions(data)
clean_vr = clean_file(dd.get_variant_regions(data), data, prefix="cleaned-")
merged_vr = merge_overlaps(clean_vr, data)
data["config"]["algorithm"]["variant_regions"] = clean_vr
data["config"]["algorithm"]["variant_regions_merged"] = merged_vr
if dd.get_coverage(data):
if not utils.get_in(data, ("config", "algorithm", "coverage_orig")):
data["config"]["algorithm"]["coverage_orig"] = dd.get_coverage(data)
clean_cov_bed = clean_file(dd.get_coverage(data), data, prefix="cov-", simple=True)
merged_cov_bed = merge_overlaps(clean_cov_bed, data)
data["config"]["algorithm"]["coverage"] = clean_cov_bed
data["config"]["algorithm"]["coverage_merged"] = merged_cov_bed
if 'seq2c' in get_svcallers(data):
seq2c_ready_bed = prep_seq2c_bed(data)
if not seq2c_ready_bed:
logger.warning("Can't run Seq2C without a svregions or variant_regions BED file")
else:
data["config"]["algorithm"]["seq2c_bed_ready"] = seq2c_ready_bed
elif regions.get_sv_bed(data):
dd.set_sv_regions(data, clean_file(regions.get_sv_bed(data), data, prefix="svregions-"))
return data | 0.005722 |
def bootstrap(**kwargs):
""" Bootstrap an EC2 instance that has been booted into an AMI from http://www.daemonology.net/freebsd-on-ec2/
Note: deprecated, current AMI images are basically pre-bootstrapped, they just need to be configured.
"""
# the user for the image is `ec2-user`, there is no sudo, but we can su to root w/o password
original_host = env.host_string
env.host_string = 'ec2-user@%s' % env.instance.uid
bootstrap_files = env.instance.config.get('bootstrap-files', 'bootstrap-files')
put('%s/authorized_keys' % bootstrap_files, '/tmp/authorized_keys')
put(join(bsdploy_path, 'enable_root_login_on_daemonology.sh'), '/tmp/', mode='0775')
run("""su root -c '/tmp/enable_root_login_on_daemonology.sh'""")
# revert back to root
env.host_string = original_host
# give sshd a chance to restart
sleep(2)
run('rm /tmp/enable_root_login_on_daemonology.sh')
# allow overwrites from the commandline
env.instance.config.update(kwargs)
bu = BootstrapUtils()
bu.ssh_keys = None
bu.upload_authorized_keys = False
bu.bootstrap_files_yaml = 'daemonology-files.yml'
bu.print_bootstrap_files()
bu.create_bootstrap_directories()
bu.upload_bootstrap_files({})
# we need to install python here, because there is no way to install it via
# ansible playbooks
bu.install_pkg('/', chroot=False, packages=['python27']) | 0.004237 |
def get_entries(self, start=0, end=0, data_request=None, steam_ids=None):
"""Get leaderboard entries.
:param start: start entry, not index (e.g. rank 1 is ``start=1``)
:type start: :class:`int`
:param end: end entry, not index (e.g. only one entry then ``start=1,end=1``)
:type end: :class:`int`
:param data_request: data being requested
:type data_request: :class:`steam.enums.common.ELeaderboardDataRequest`
:param steam_ids: list of steam ids when using :prop:`.ELeaderboardDataRequest.Users`
:type steamids: :class:`list`
:return: a list of entries, see ``CMsgClientLBSGetLBEntriesResponse``
:rtype: :class:`list`
:raises: :class:`LookupError` on message timeout or error
"""
message = MsgProto(EMsg.ClientLBSGetLBEntries)
message.body.app_id = self.app_id
message.body.leaderboard_id = self.id
message.body.range_start = start
message.body.range_end = end
message.body.leaderboard_data_request = self.data_request if data_request is None else data_request
if steam_ids:
message.body.steamids.extend(steam_ids)
resp = self._steam.send_job_and_wait(message, timeout=15)
if not resp:
raise LookupError("Didn't receive response within 15seconds :(")
if resp.eresult != EResult.OK:
raise LookupError(EResult(resp.eresult))
if resp.HasField('leaderboard_entry_count'):
self.entry_count = resp.leaderboard_entry_count
return resp.entries | 0.003147 |
def leaves(self, fragment_type=None):
"""
The current list of sync map fragments
which are (the values of) the leaves
of the sync map tree.
:rtype: list of :class:`~aeneas.syncmap.fragment.SyncMapFragment`
.. versionadded:: 1.7.0
"""
leaves = self.fragments_tree.vleaves_not_empty
if fragment_type is None:
return leaves
return [l for l in leaves if l.fragment_type == fragment_type] | 0.006289 |
def pull_all_rtl(configuration):
"""
Pulls all translations - reviewed or not - for RTL languages
"""
print("Pulling all translated RTL languages from transifex...")
for lang in configuration.rtl_langs:
print('rm -rf conf/locale/' + lang)
execute('rm -rf conf/locale/' + lang)
execute('tx pull -l ' + lang)
clean_translated_locales(configuration, langs=configuration.rtl_langs) | 0.002353 |
def get_bin(self):
"""Return the binary notation of the address/netmask."""
return _convert(self._ip_dec, notation=IP_BIN,
inotation=IP_DEC, _check=False, _isnm=self._isnm) | 0.009434 |
def find(self, other):
"""Return an interable of elements that overlap other in the tree."""
iset = self._iset
l = binsearch_left_start(iset, other[0] - self._maxlen, 0, len(iset))
r = binsearch_right_end(iset, other[1], 0, len(iset))
iopts = iset[l:r]
iiter = (s for s in iopts if s[0] <= other[1] and s[1] >= other[0])
for o in iiter: yield o | 0.01 |
def stop_server(self, datacenter_id, server_id):
"""
Stops the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
"""
response = self._perform_request(
url='/datacenters/%s/servers/%s/stop' % (
datacenter_id,
server_id),
method='POST-ACTION')
return response | 0.003802 |
def for_branch(self, branch):
"""
Return a new CourseLocator for another branch of the same library (also version agnostic)
"""
if self.org is None and branch is not None:
raise InvalidKeyError(self.__class__, "Branches must have full library ids not just versions")
return self.replace(branch=branch, version_guid=None) | 0.010753 |
def td_taper(out, start, end, beta=8, side='left'):
"""Applies a taper to the given TimeSeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : TimeSeries
The ``TimeSeries`` to taper.
start : float
The time (in s) to start the taper window.
end : float
The time (in s) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
TimeSeries
The tapered time series.
"""
out = out.copy()
width = end - start
winlen = 2 * int(width / out.delta_t)
window = Array(signal.get_window(('kaiser', beta), winlen))
xmin = int((start - out.start_time) / out.delta_t)
xmax = xmin + winlen//2
if side == 'left':
out[xmin:xmax] *= window[:winlen//2]
if xmin > 0:
out[:xmin].clear()
elif side == 'right':
out[xmin:xmax] *= window[winlen//2:]
if xmax < len(out):
out[xmax:].clear()
else:
raise ValueError("unrecognized side argument {}".format(side))
return out | 0.000694 |
def get_array_shape(self, key):
"""Return array's shape"""
data = self.model.get_data()
return data[key].shape | 0.014599 |
def _report_completion(self):
"""Update shared counters to signal that we are done with this cluster.
Call just before exiting run() method (in a finally clause)"""
rem_clust = self.remaining_clusters
if rem_clust is not None:
# -= is non-atomic, need to acquire a lock
with self.remaining_clusters_lock:
rem_clust.value -= 1
# we do not need this object anymore
self.remaining_clusters = None
print("{} reported completion".format(self.name), file=self.log_fh) | 0.003515 |
def get_catalog_hierarchy_session(self, proxy):
"""Gets the catalog hierarchy traversal session.
arg: proxy (osid.proxy.Proxy): proxy
return: (osid.cataloging.CatalogHierarchySession) - a
``CatalogHierarchySession``
raise: NullArgument - ``proxy`` is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy()`` is ``true``.*
"""
if not self.supports_catalog_hierarchy():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogHierarchySession(proxy=proxy, runtime=self._runtime) | 0.003704 |
def var(inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0] * len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations) / float(n - 1) | 0.00271 |
def minkowski_distance(point1, point2, degree=2):
"""!
@brief Calculate Minkowski distance between two vectors.
\f[
dist(a, b) = \sqrt[p]{ \sum_{i=0}^{N}\left(a_{i} - b_{i}\right)^{p} };
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@param[in] degree (numeric): Degree of that is used for Minkowski distance.
@return (double) Minkowski distance between two vectors.
@see euclidean_distance
"""
distance = 0.0
for i in range(len(point1)):
distance += (point1[i] - point2[i]) ** degree
return distance ** (1.0 / degree) | 0.005997 |
def find_protein_complexes(model):
"""
Find reactions that are catalyzed by at least a heterodimer.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Reactions whose gene-protein-reaction association contains at least one
logical AND combining different gene products (heterodimer).
"""
complexes = []
for rxn in model.reactions:
if not rxn.gene_reaction_rule:
continue
size = find_top_level_complex(rxn.gene_reaction_rule)
if size >= 2:
complexes.append(rxn)
return complexes | 0.001531 |
def make_draft(self):
"""
Make this version the draft
"""
assert self.__class__ == self.get_version_class()
# If this is draft do nothing
if self.state == self.DRAFT:
return
with xact():
# Delete whatever is currently this draft
try:
klass = self.get_version_class()
old_draft = klass.normal.get(object_id=self.object_id,
state=self.DRAFT)
old_draft.delete()
except klass.DoesNotExist:
pass
# Set this to draft and save
self.state = self.DRAFT
# Make last_scheduled and last save match on draft
self.last_save = self.last_scheduled
self._clone() | 0.002436 |
def from_file(cls, filename="CTRL", **kwargs):
"""
Creates a CTRL file object from an existing file.
Args:
filename: The name of the CTRL file. Defaults to 'CTRL'.
Returns:
An LMTOCtrl object.
"""
with zopen(filename, "rt") as f:
contents = f.read()
return LMTOCtrl.from_string(contents, **kwargs) | 0.005115 |
def initialize(self):
"""
Initialize the self._tm if not already initialized.
"""
if self._tm is None:
params = {
"columnCount": self.columnCount,
"basalInputSize": self.basalInputWidth,
"apicalInputSize": self.apicalInputWidth,
"cellsPerColumn": self.cellsPerColumn,
"activationThreshold": self.activationThreshold,
"initialPermanence": self.initialPermanence,
"connectedPermanence": self.connectedPermanence,
"minThreshold": self.minThreshold,
"sampleSize": self.sampleSize,
"permanenceIncrement": self.permanenceIncrement,
"permanenceDecrement": self.permanenceDecrement,
"basalPredictedSegmentDecrement": self.basalPredictedSegmentDecrement,
"apicalPredictedSegmentDecrement": self.apicalPredictedSegmentDecrement,
"maxSynapsesPerSegment": self.maxSynapsesPerSegment,
"seed": self.seed,
}
if self.implementation == "ApicalTiebreakCPP":
params["learnOnOneCell"] = self.learnOnOneCell
params["maxSegmentsPerCell"] = self.maxSegmentsPerCell
import htmresearch_core.experimental
cls = htmresearch_core.experimental.ApicalTiebreakPairMemory
elif self.implementation == "ApicalTiebreak":
params["reducedBasalThreshold"] = self.reducedBasalThreshold
import htmresearch.algorithms.apical_tiebreak_temporal_memory
cls = htmresearch.algorithms.apical_tiebreak_temporal_memory.ApicalTiebreakPairMemory
elif self.implementation == "ApicalDependent":
params["reducedBasalThreshold"] = self.reducedBasalThreshold
import htmresearch.algorithms.apical_dependent_temporal_memory
cls = htmresearch.algorithms.apical_dependent_temporal_memory.TripleMemory
else:
raise ValueError("Unrecognized implementation %s" % self.implementation)
self._tm = cls(**params) | 0.005732 |
def scheme(name, bins, bin_method='quantiles'):
"""Return a custom scheme based on CARTOColors.
Args:
name (str): Name of a CARTOColor.
bins (int or iterable): If an `int`, the number of bins for classifying
data. CARTOColors have 7 bins max for quantitative data, and 11 max
for qualitative data. If `bins` is a `list`, it is the upper range
for classifying data. E.g., `bins` can be of the form ``(10, 20, 30,
40, 50)``.
bin_method (str, optional): One of methods in :obj:`BinMethod`.
Defaults to ``quantiles``. If `bins` is an interable, then that is
the bin method that will be used and this will be ignored.
.. Warning::
Input types are particularly sensitive in this function, and little
feedback is given for errors. ``name`` and ``bin_method`` arguments
are case-sensitive.
"""
return {
'name': name,
'bins': bins,
'bin_method': (bin_method if isinstance(bins, int) else ''),
} | 0.00096 |
def classes_can_admin(self):
"""Return all the classes (sorted) that this user can admin."""
if self.is_admin:
return sorted(Session.query(Class).all())
else:
return sorted(self.admin_for) | 0.008475 |
def _load_embedding(self, pretrained_file_path, elem_delim,
encoding='utf8'):
"""Load embedding vectors from a pre-trained token embedding file.
Both text files and TokenEmbedding serialization files are supported.
elem_delim and encoding are ignored for non-text files.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
text embedding vector initialized by `self._init_unknown_vec`.
If a token is encountered multiple times in the pre-trained text embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
"""
pretrained_file_path = os.path.expanduser(pretrained_file_path)
if not os.path.isfile(pretrained_file_path):
raise ValueError('`pretrained_file_path` must be a valid path '
'to the pre-trained token embedding file.')
logging.info('Loading pre-trained token embedding vectors from %s',
pretrained_file_path)
if pretrained_file_path.endswith('.npz'):
self._load_embedding_serialized(
pretrained_file_path=pretrained_file_path)
else:
self._load_embedding_txt(
pretrained_file_path=pretrained_file_path,
elem_delim=elem_delim, encoding=encoding) | 0.004954 |
def html_factory(tag, **defaults):
'''Returns an :class:`Html` factory function for ``tag`` and a given
dictionary of ``defaults`` parameters. For example::
>>> input_factory = html_factory('input', type='text')
>>> html = input_factory(value='bla')
'''
def html_input(*children, **params):
p = defaults.copy()
p.update(params)
return Html(tag, *children, **p)
return html_input | 0.002315 |
def get_sign_key(exported_session_key, magic_constant):
"""
3.4.5.2 SIGNKEY
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return sign_key: Key used to sign messages
"""
sign_key = hashlib.md5(exported_session_key + magic_constant).digest()
return sign_key | 0.006834 |
def _add_kwarg_datasets(datasets, kwargs):
"""Add data sets of the given kwargs.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
for test_method_suffix, dataset in six.iteritems(kwargs):
datasets[test_method_suffix] = dataset | 0.002392 |
async def send_script(self, conn_id, data):
"""Send a a script to this IOTile device
Args:
conn_id (int): A unique identifier that will refer to this connection
data (bytes): the script to send to the device
"""
self._ensure_connection(conn_id, True)
connection_string = self._get_property(conn_id, "connection_string")
msg = dict(connection_string=connection_string, fragment_count=1, fragment_index=0,
script=base64.b64encode(data))
await self._send_command(OPERATIONS.SEND_SCRIPT, msg, COMMANDS.SendScriptResponse) | 0.008078 |
def start(self, attempts=5, timeout=2):
"""
Start the network, will check if the network is active ``attempts``
times, waiting ``timeout`` between each attempt.
Args:
attempts (int): number of attempts to check the network is active
timeout (int): timeout for each attempt
Returns:
None
Raises:
RuntimeError: if network creation failed, or failed to verify it is
active.
"""
if not self.alive():
with LogTask('Create network %s' % self.name()):
net = self.libvirt_con.networkCreateXML(self._libvirt_xml())
if net is None:
raise RuntimeError(
'failed to create network, XML: %s' %
(self._libvirt_xml())
)
for _ in range(attempts):
if net.isActive():
return
LOGGER.debug(
'waiting for network %s to become active', net.name()
)
time.sleep(timeout)
raise RuntimeError(
'failed to verify network %s is active' % net.name()
) | 0.001566 |
def __model_class(self, model_name):
""" this method is used by the lru_cache, do not call directly """
build_schema = deepcopy(self.definitions[model_name])
return self.schema_class(build_schema, model_name) | 0.008621 |
def create_data_element_from_resource(self, resource):
"""
Returns a new data element for the given resource object.
:returns: object implementing :class:`IResourceDataElement`.
"""
mp = self.__mp_reg.find_or_create_mapping(type(resource))
return mp.data_element_class.create_from_resource(resource) | 0.005747 |
def reactions_to_files(model, dest, writer, split_subsystem):
"""Turn the reaction subsystems into their own files.
If a subsystem has a number of reactions over the threshold, it gets its
own YAML file. All other reactions, those that don't have a subsystem or
are in a subsystem that falls below the threshold, get added to a common
reaction file.
Args:
model: :class:`psamm_import.model.MetabolicModel`.
dest: output path for model files.
writer: :class:`psamm.datasource.native.ModelWriter`.
split_subsystem: Divide reactions into multiple files by subsystem.
"""
def safe_file_name(origin_name):
safe_name = re.sub(
r'\W+', '_', origin_name, flags=re.UNICODE)
safe_name = re.sub(
r'_+', '_', safe_name.lower(), flags=re.UNICODE)
safe_name = safe_name.strip('_')
return safe_name
common_reactions = []
reaction_files = []
if not split_subsystem:
common_reactions = sorted(model.reactions, key=lambda r: r.id)
if len(common_reactions) > 0:
reaction_file = 'reactions.yaml'
with open(os.path.join(dest, reaction_file), 'w') as f:
writer.write_reactions(f, common_reactions)
reaction_files.append(reaction_file)
else:
subsystems = {}
for reaction in sorted(model.reactions, key=lambda r: r.id):
if 'subsystem' in reaction.properties:
subsystem_file = safe_file_name(
reaction.properties['subsystem'])
subsystems.setdefault(subsystem_file, []).append(reaction)
else:
common_reactions.append(reaction)
subsystem_folder = 'reactions'
sub_existance = False
for subsystem_file, reactions in iteritems(subsystems):
if len(reactions) < _MAX_REACTION_COUNT:
for reaction in reactions:
common_reactions.append(reaction)
else:
if len(reactions) > 0:
mkdir_p(os.path.join(dest, subsystem_folder))
subsystem_file = os.path.join(
subsystem_folder, '{}.yaml'.format(subsystem_file))
with open(os.path.join(dest, subsystem_file), 'w') as f:
writer.write_reactions(f, reactions)
reaction_files.append(subsystem_file)
sub_existance = True
reaction_files.sort()
if sub_existance:
reaction_file = os.path.join(
subsystem_folder, 'other_reactions.yaml')
else:
reaction_file = 'reactions.yaml'
if len(common_reactions) > 0:
with open(os.path.join(dest, reaction_file), 'w') as f:
writer.write_reactions(f, common_reactions)
reaction_files.append(reaction_file)
return reaction_files | 0.00034 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.