text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def dtime_range(self, start=None, end=None, periods=None,
freq="1day", normalize=False, return_date=False):
"""A pure Python implementation of pandas.date_range().
Given 2 of start, end, periods and freq, generate a series of
datetime object.
:param start: Left bound for generating dates
:type start: str or datetime.datetime (default None)
:param end: Right bound for generating dates
:type end: str or datetime.datetime (default None)
:param periods: Number of date points. If None, must specify start
and end
:type periods: integer (default None)
:param freq: string, default '1day' (calendar daily)
Available mode are day, hour, min, sec
Frequency strings can have multiples. e.g.
'7day', '6hour', '5min', '4sec'
:type freq: string (default '1day' calendar daily)
:param normalize: Trigger that normalize start/end dates to midnight
:type normalize: boolean (default False)
:return: A list of datetime.datetime object. An evenly sampled time
series.
Usage::
>>> from __future__ print_function
>>> from weatherlab.lib.timelib.timewrapper import timewrapper
>>> for dt in timewrapper.dtime_range("2014-1-1", "2014-1-7"):
... print(dt)
2014-01-01 00:00:00
2014-01-02 00:00:00
2014-01-03 00:00:00
2014-01-04 00:00:00
2014-01-05 00:00:00
2014-01-06 00:00:00
2014-01-07 00:00:00
**中文文档**
生成等间隔的时间序列。
需要给出, "起始", "结束", "数量"中的任意两个。以及指定"频率"。以此唯一
确定一个等间隔时间序列。"频率"项所支持的命令字符有"7day", "6hour",
"5min", "4sec" (可以改变数字).
"""
def normalize_datetime_to_midnight(dtime):
"""normalize a datetime %Y-%m-%d %H:%M:%S to %Y-%m-%d 00:00:00
"""
return datetime(dtime.year, dtime.month, dtime.day)
def not_normalize(dtime):
"""do not normalize
"""
return dtime
time_series = list()
# if two of start, end, or periods exist
if (bool(start) + bool(end) + bool(periods)) == 2:
if normalize:
converter = normalize_datetime_to_midnight
else:
converter = not_normalize
interval = self._freq_parser(freq)
if (bool(start) & bool(end)): # start and end
start = self.parse_datetime(start)
end = self.parse_datetime(end)
if start > end: # if start time later than end time, raise error
raise Exception("start time has to be eariler and equal "
"than end time")
start = start - interval
while 1:
start += interval
if start <= end:
time_series.append( converter(start) )
else:
break
elif (bool(start) & bool(periods)): # start and periods
start = self.parse_datetime(start)
start = start - interval
for _ in range(periods):
start += interval
time_series.append( converter(start) )
elif (bool(end) & bool(periods)): # end and periods
end = self.parse_datetime(end)
start = end - interval * periods
for _ in range(periods):
start += interval
time_series.append( converter(start) )
else:
raise Exception("Must specify two of start, end, or periods")
if return_date:
time_series = [i.date() for i in time_series]
return time_series | 0.010014 |
def get_decoder(encoding):
"""Creates encoder object for the given encoding.
:param encoding: desired output encoding protocol
:type encoding: Encoding
:return: corresponding IEncoder object
:rtype: IEncoder
"""
if encoding == Encoding.V1_THRIFT:
return _V1ThriftDecoder()
if encoding == Encoding.V1_JSON:
raise NotImplementedError(
'{} decoding not yet implemented'.format(encoding))
if encoding == Encoding.V2_JSON:
raise NotImplementedError(
'{} decoding not yet implemented'.format(encoding))
raise ZipkinError('Unknown encoding: {}'.format(encoding)) | 0.001553 |
def __normalize(self):
"""
Adjusts the values of the filters to be correct.
For example, if you set grade 'B' to True, then 'All'
should be set to False
"""
# Don't normalize if we're already normalizing or intializing
if self.__normalizing is True or self.__initialized is False:
return
self.__normalizing = True
self.__normalize_grades()
self.__normalize_progress()
self.__normalizing = False | 0.004032 |
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train) | 0.003049 |
def on_touch_move(self, touch):
"""If I'm being dragged, move to follow the touch."""
if touch.grab_current is not self:
return False
self.center = touch.pos
return True | 0.00939 |
def dcounts(self):
"""
:return: a data frame with names and distinct counts and fractions for all columns in the database
"""
print("WARNING: Distinct value count for all tables can take a long time...", file=sys.stderr)
sys.stderr.flush()
data = []
for t in self.tables():
for c in t.columns():
data.append([t.name(), c.name(), c.dcount(), t.size(), c.dcount() / float(t.size())])
df = pd.DataFrame(data, columns=["table", "column", "distinct", "size", "fraction"])
return df | 0.010399 |
def column_width(tokens):
"""
Return a suitable column width to display one or more strings.
"""
get_len = tools.display_len if PY3 else len
lens = sorted(map(get_len, tokens or [])) or [0]
width = lens[-1]
# adjust for disproportionately long strings
if width >= 18:
most = lens[int(len(lens) * 0.9)]
if most < width + 6:
return most
return width | 0.002421 |
def scale_pixels(color, layer):
"""Scales the pixel to the virtual pixelmap."""
pixelmap = []
# Scaling the pixel offsets.
for pix_x in range(MAX_X + 1):
for pix_y in range(MAX_Y + 1):
# Horizontal pixels
y1 = pix_y * dotsize[0]
x1 = pix_x * dotsize[1]
# Vertical pixels
y2 = pix_y * dotsize[0] + (dotsize[0] - 1)
x2 = pix_x * dotsize[1] + (dotsize[1] - 1)
if (y1 <= MAX_Y) and (y2 <= MAX_Y):
if (x1 <= MAX_X) and (x2 <= MAX_X):
if (pix_x, pix_y) in layer:
pixelmap.append([(y1, x1), (y2, x2), color])
return pixelmap | 0.001443 |
def render(self, name, value, attrs=None, renderer=None):
"""
Render the placeholder field.
"""
other_instance_languages = None
if value and value != "-DUMMY-":
if get_parent_language_code(self.parent_object):
# Parent is a multilingual object, provide information
# for the copy dialog.
other_instance_languages = get_parent_active_language_choices(
self.parent_object, exclude_current=True)
context = {
'cp_plugin_list': list(self.plugins),
'placeholder_id': '',
'placeholder_slot': self.slot,
'other_instance_languages': other_instance_languages,
}
return mark_safe(render_to_string('admin/fluent_contents/placeholderfield/widget.html', context)) | 0.003563 |
def add_previous_name(self, name):
"""Add previous name.
Args:
:param name: previous name for the current author.
:type name: string
"""
self._ensure_field('name', {})
self.obj['name'].setdefault('previous_names', []).append(name) | 0.00678 |
def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100, dummy = 51467):
"""Get music album list.
:param tagtype: ?
:return: ``metadata`` or ``False``
:metadata:
- u'album':u'Greatest Hits Coldplay',
- u'artist':u'Coldplay',
- u'href':u'/Coldplay - Clocks.mp3',
- u'musiccount':1,
- u'resourceno':12459548378,
- u'tagtype':1,
- u'thumbnailpath':u'N',
- u'totalpath':u'/'
"""
data = {'tagtype': tagtype,
'startnum': startnum,
'pagingrow': pagingrow,
'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
}
s, metadata = self.POST('getMusicAlbumList', data)
if s is True:
return metadata
else:
return False | 0.011931 |
def wait(self):
'''This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent. '''
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
return self.exitstatus
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status): # pragma: no cover
# You can't call wait() on a child process in the stopped state.
raise PtyProcessError('Called wait() on a stopped child ' +
'process. This is not supported. Is some other ' +
'process attempting job control with our child pid?')
return self.exitstatus | 0.002996 |
def findEndpoint(html):
"""Search the given html content for all <link /> elements
and return any discovered WebMention URL.
:param html: html content
:rtype: WebMention URL
"""
poss_rels = ['webmention', 'http://webmention.org', 'http://webmention.org/', 'https://webmention.org', 'https://webmention.org/']
# find elements with correct rels and a href value
all_links = BeautifulSoup(html, _html_parser).find_all(rel=poss_rels, href=True)
for link in all_links:
s = link.get('href', None)
if s is not None:
return s
return None | 0.005 |
def _count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length):
"""Count number of times subtokens appear, and generate new subtokens.
Args:
token_counts: dict mapping tokens to the number of times they appear in the
original files.
alphabet: list of allowed characters. Used to escape the tokens, which
guarantees that all tokens can be split into subtokens.
subtoken_dict: dict mapping subtokens to ids.
max_subtoken_length: maximum length of subtoken in subtoken_dict.
Returns:
A defaultdict mapping subtokens to the number of times they appear in the
tokens. The dict may contain new subtokens.
"""
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
token = _escape_token(token, alphabet)
subtokens = _split_token_to_subtokens(
token, subtoken_dict, max_subtoken_length)
# Generate new subtokens by taking substrings from token.
start = 0
for subtoken in subtokens:
for end in xrange(start + 1, len(token) + 1):
new_subtoken = token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
return subtoken_counts | 0.006568 |
def partition_key(self):
"""
The partition key of the event data object.
:rtype: bytes
"""
try:
return self._annotations[self._partition_key]
except KeyError:
return self._annotations.get(EventData.PROP_PARTITION_KEY, None) | 0.006757 |
async def _process_latching(self, key, latching_entry):
"""
This is a private utility method.
This method process latching events and either returns them via
callback or stores them in the latch map
:param key: Encoded pin
:param latching_entry: a latch table entry
:returns: Callback or store data in latch map
"""
if latching_entry[Constants.LATCH_CALLBACK]:
# auto clear entry and execute the callback
if latching_entry[Constants.LATCH_CALLBACK_TYPE]:
await latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
# noinspection PyPep8
else:
latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
self.latch_map[key] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[Constants.LATCH_STATE] = \
Constants.LATCH_LATCHED
updated_latch_entry[Constants.LATCHED_DATA] = \
latching_entry[Constants.LATCHED_DATA]
# time stamp it
updated_latch_entry[Constants.LATCHED_TIME_STAMP] = time.time()
self.latch_map[key] = updated_latch_entry | 0.004342 |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Option(key)
if key not in Option._member_map_:
extend_enum(Option, key, default)
return Option[key] | 0.007813 |
def get_response_example(self, opt_name, var_type, opt_values):
'''
Depends on type of variable, return string with example
:param opt_name: --option name
:type opt_name: str,unicode
:param var_type: --type of variable
:type var_type: str, unicode
:param opt_values: --dictionary with properties of this variable
:type opt_values: dict
:return: example for `var_type` variable
:rtype: str, unicode
'''
if opt_name == 'previous' and var_type == 'uri':
result = None
elif var_type == 'uri':
params = {i.group(0): 1 for i in self.find_param.finditer(self.current_path)}
result = self.type_dict[var_type].format(self.current_path.format(**params))
if opt_name == 'next':
result += '?limit=1&offset=1'
elif opt_name == 'count' and var_type == 'integer':
result = 2
elif var_type == 'array':
items = opt_values.get('items', dict()).get('$ref', None)
item = 'array_example'
if items:
item = self.get_object_example(items.split('/')[-1])
result = [item]
elif var_type == 'autocomplete':
result = opt_values.get('enum', list())[0]
elif var_type in [None, 'object']:
def_name = opt_values.get('$ref').split('/')[-1]
result = self.get_object_example(def_name)
elif var_type =='select2':
def_name = opt_values['additionalProperties']['model']['$ref'].split('/')[-1]
value_field_name = opt_values['additionalProperties']['value_field']
def_model = self.definitions[def_name].get('properties')
value_field = def_model.get(value_field_name, None)
var_type = value_field.get('format', None) or value_field.get('type', None)
result = self.get_response_example(opt_name, var_type, def_model)
else:
var_type = var_type.replace('-', '_')
result = opt_values.get('default', None) or self.type_dict[var_type]
return result | 0.004221 |
def _full_shape_filter(t: List, shapes: List) -> bool:
"""
Shape filter
Args:
t: List, list of tokens
shapes: List
Returns: bool
"""
if shapes:
for a_token in t:
if a_token._.full_shape not in shapes:
return False
return True | 0.005618 |
def parse_header(recipe, header="from", remove_header=True):
'''take a recipe, and return the complete header, line. If
remove_header is True, only return the value.
Parameters
==========
recipe: the recipe file
headers: the header key to find and parse
remove_header: if true, remove the key
'''
parsed_header = None
fromline = [x for x in recipe.split('\n') if "%s:" %header in x.lower()]
# Case 1: We did not find the fromline
if len(fromline) == 0:
return ""
# Case 2: We found it!
if len(fromline) > 0:
fromline = fromline[0]
parsed_header = fromline.strip()
# Does the user want to clean it up?
if remove_header is True:
parsed_header = fromline.split(':', 1)[-1].strip()
return parsed_header | 0.002436 |
def EXTRA_LOGGING(self):
"""
lista modulos con los distintos niveles a logear y su
nivel de debug
Por ejemplo:
[Logs]
EXTRA_LOGGING = oscar.paypal:DEBUG, django.db:INFO
"""
input_text = get('EXTRA_LOGGING', '')
modules = input_text.split(',')
if input_text:
modules = input_text.split(',')
modules = [x.split(':') for x in modules]
else:
modules = []
return modules | 0.003929 |
def as_bin(self, *args, **kwargs):
"""Returns a binary blob containing the streamed transaction.
For information about the parameters, see :func:`Tx.stream <stream>`
:return: binary blob that would parse to the given transaction
"""
f = io.BytesIO()
self.stream(f, *args, **kwargs)
return f.getvalue() | 0.005571 |
def filter_not_empty_values(value):
"""Returns a list of non empty values or None"""
if not value:
return None
data = [x for x in value if x]
if not data:
return None
return data | 0.004673 |
def event(self, interface_id, address, value_key, value):
"""If a device emits some sort event, we will handle it here."""
LOG.debug("RPCFunctions.event: interface_id = %s, address = %s, value_key = %s, value = %s" % (
interface_id, address, value_key.upper(), str(value)))
self.devices_all[interface_id.split(
'-')[-1]][address].event(interface_id, value_key.upper(), value)
if self.eventcallback:
self.eventcallback(interface_id=interface_id, address=address,
value_key=value_key.upper(), value=value)
return True | 0.004823 |
def _format_variables(self, raw_vars):
"""
:param raw_vars: a `dict` of `var_name: var_object` pairs
:type raw_vars: dict
:return: sorted list of variables as a unicode string
:rtype: unicode
"""
f_vars = []
for var, value in raw_vars.items():
if not (var.startswith('__') and var.endswith('__')):
repr_value = self._get_repr(value)
f_vars.append('{0} = {1}'.format(var, repr_value))
return '\n'.join(sorted(f_vars)) | 0.003759 |
def _rule_option(self):
""" Parses the production rule::
option : NAME value ';'
Returns list (name, value_list).
"""
name = self._get_token(self.RE_NAME)
value = self._rule_value()
self._expect_token(';')
return [name, value] | 0.006494 |
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
chunk_size (int): The generator will return up to that much data
per iteration, but may return less. If ``None``, data will be
streamed as it is received. Default: 2 MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
"""
img = self.id
if named:
img = self.tags[0] if self.tags else img
if isinstance(named, six.string_types):
if named not in self.tags:
raise InvalidArgument(
"{} is not a valid tag for this image".format(named)
)
img = named
return self.client.api.get_image(img, chunk_size) | 0.001201 |
def multi_plot_time(DataArray, SubSampleN=1, units='s', xlim=None, ylim=None, LabelArray=[], show_fig=True):
"""
plot the time trace for multiple data sets on the same axes.
Parameters
----------
DataArray : array-like
array of DataObject instances for which to plot the PSDs
SubSampleN : int, optional
Number of intervals between points to remove (to sub-sample data so
that you effectively have lower sample rate to make plotting easier
and quicker.
xlim : array-like, optional
2 element array specifying the lower and upper x limit for which to
plot the time signal
LabelArray : array-like, optional
array of labels for each data-set to be plotted
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
ax : matplotlib.axes.Axes object
The axes object created
"""
unit_prefix = units[:-1] # removed the last char
if LabelArray == []:
LabelArray = ["DataSet {}".format(i)
for i in _np.arange(0, len(DataArray), 1)]
fig = _plt.figure(figsize=properties['default_fig_size'])
ax = fig.add_subplot(111)
for i, data in enumerate(DataArray):
ax.plot(unit_conversion(data.time.get_array()[::SubSampleN], unit_prefix), data.voltage[::SubSampleN],
alpha=0.8, label=LabelArray[i])
ax.set_xlabel("time (s)")
if xlim != None:
ax.set_xlim(xlim)
if ylim != None:
ax.set_ylim(ylim)
ax.grid(which="major")
legend = ax.legend(loc="best", frameon = 1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('white')
ax.set_ylabel("voltage (V)")
if show_fig == True:
_plt.show()
return fig, ax | 0.005547 |
def LSTM(nO, nI):
"""Create an LSTM layer. Args: number out, number in"""
weights = LSTM_weights(nO, nI)
gates = LSTM_gates(weights.ops)
return Recurrent(RNN_step(weights, gates)) | 0.005128 |
def create_keyword_file(self, algorithm):
"""Create keyword file for the raster file created.
Basically copy a template from keyword file in converter data
and add extra keyword (usually a title)
:param algorithm: Which re-sampling algorithm to use.
valid options are 'nearest' (for nearest neighbour), 'invdist'
(for inverse distance), 'average' (for moving average). Defaults
to 'nearest' if not specified. Note that passing re-sampling alg
parameters is currently not supported. If None is passed it will
be replaced with 'nearest'.
:type algorithm: str
"""
keyword_io = KeywordIO()
# Set thresholds for each exposure
mmi_default_classes = default_classification_thresholds(
earthquake_mmi_scale
)
mmi_default_threshold = {
earthquake_mmi_scale['key']: {
'active': True,
'classes': mmi_default_classes
}
}
generic_default_classes = default_classification_thresholds(
generic_hazard_classes
)
generic_default_threshold = {
generic_hazard_classes['key']: {
'active': True,
'classes': generic_default_classes
}
}
threshold_keyword = {}
for exposure in exposure_all:
# Not all exposure is supported by earthquake_mmi_scale
if exposure in earthquake_mmi_scale['exposures']:
threshold_keyword[exposure['key']] = mmi_default_threshold
else:
threshold_keyword[
exposure['key']] = generic_default_threshold
extra_keywords = {
extra_keyword_earthquake_latitude['key']: self.latitude,
extra_keyword_earthquake_longitude['key']: self.longitude,
extra_keyword_earthquake_magnitude['key']: self.magnitude,
extra_keyword_earthquake_depth['key']: self.depth,
extra_keyword_earthquake_description['key']: self.description,
extra_keyword_earthquake_location['key']: self.location,
extra_keyword_earthquake_event_time['key']: self.time.strftime(
'%Y-%m-%dT%H:%M:%S'),
extra_keyword_time_zone['key']: self.time_zone,
extra_keyword_earthquake_x_minimum['key']: self.x_minimum,
extra_keyword_earthquake_x_maximum['key']: self.x_maximum,
extra_keyword_earthquake_y_minimum['key']: self.y_minimum,
extra_keyword_earthquake_y_maximum['key']: self.y_maximum,
extra_keyword_earthquake_event_id['key']: self.event_id
}
for key, value in list(self.extra_keywords.items()):
extra_keywords[key] = value
# Delete empty element.
empty_keys = []
for key, value in list(extra_keywords.items()):
if value is None:
empty_keys.append(key)
for empty_key in empty_keys:
extra_keywords.pop(empty_key)
keywords = {
'hazard': hazard_earthquake['key'],
'hazard_category': hazard_category_single_event['key'],
'keyword_version': inasafe_keyword_version,
'layer_geometry': layer_geometry_raster['key'],
'layer_mode': layer_mode_continuous['key'],
'layer_purpose': layer_purpose_hazard['key'],
'continuous_hazard_unit': unit_mmi['key'],
'classification': earthquake_mmi_scale['key'],
'thresholds': threshold_keyword,
'extra_keywords': extra_keywords,
'active_band': 1
}
if self.algorithm_name:
layer_path = os.path.join(
self.output_dir, '%s-%s.tif' % (
self.output_basename, algorithm))
else:
layer_path = os.path.join(
self.output_dir, '%s.tif' % self.output_basename)
# append title and source to the keywords file
if len(self.title.strip()) == 0:
keyword_title = self.output_basename
else:
keyword_title = self.title
keywords['title'] = keyword_title
hazard_layer = QgsRasterLayer(layer_path, keyword_title)
if not hazard_layer.isValid():
raise InvalidLayerError()
keyword_io.write_keywords(hazard_layer, keywords) | 0.000452 |
def dir_exists(directory):
"""
If a directory already exists that will be overwritten by some action, this
will ask the user whether or not to continue with the deletion.
If the user responds affirmatively, then the directory will be removed. If
the user responds negatively, then the process will abort.
"""
log.info('Directory exists! Asking the user')
reply = input('''The directory {0} already exists.
It will be overwritten if the operation continues.
Replace? [Y/n]'''.format(directory))
if reply.lower() in ['y', 'yes', '']:
shutil.rmtree(directory)
os.makedirs(directory)
else:
log.critical('Aborting process, user declined overwriting {0}'.format(directory))
sys.exit('Aborting process!') | 0.002597 |
def add_item(self, query_params=None):
'''
Add an item to this checklist. Returns a dictionary of values of new
item.
'''
return self.fetch_json(
uri_path=self.base_uri + '/checkItems',
http_method='POST',
query_params=query_params or {}
) | 0.006192 |
def screenshot(self, png_filename=None, format='raw'):
"""
Screenshot with PNG format
Args:
png_filename(string): optional, save file name
format(string): return format, pillow or raw(default)
Returns:
raw data or PIL.Image
Raises:
WDAError
"""
value = self.http.get('screenshot').value
raw_value = base64.b64decode(value)
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_value.startswith(png_header) and png_filename:
raise WDAError(-1, "screenshot png format error")
if png_filename:
with open(png_filename, 'wb') as f:
f.write(raw_value)
if format == 'raw':
return raw_value
elif format == 'pillow':
from PIL import Image
buff = io.BytesIO(raw_value)
return Image.open(buff)
else:
raise ValueError("unknown format") | 0.003043 |
def get_metric(self, reset: bool) -> Union[float, Tuple[float, ...], Dict[str, float], Dict[str, List[float]]]:
"""
Compute and return the metric. Optionally also call :func:`self.reset`.
"""
raise NotImplementedError | 0.012048 |
def load_contents(self):
"""
Loads contents of Database from a filename database.csv.
"""
with open(self.name + ".csv") as f:
list_of_rows = f.readlines()
list_of_rows = map(
lambda x: x.strip(),
map(
lambda x: x.replace("\"", ""),
list_of_rows
)
)
for row in list_of_rows:
self.put_row(make_row(self.columns, row.split(','))) | 0.004167 |
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp | 0.000692 |
def get_languages(self):
"""
Return a list of all used languages for this page.
"""
if self._languages:
return self._languages
self._languages = cache.get(self.PAGE_LANGUAGES_KEY % (self.id))
if self._languages is not None:
return self._languages
languages = [c['language'] for
c in Content.objects.filter(page=self,
type="slug").values('language')]
# remove duplicates
languages = list(set(languages))
languages.sort()
cache.set(self.PAGE_LANGUAGES_KEY % (self.id), languages)
self._languages = languages
return languages | 0.005658 |
def compile(code: list, consts: list, names: list, varnames: list,
func_name: str = "<unknown, compiled>",
arg_count: int = 0, kwarg_defaults: Tuple[Any] = (), use_safety_wrapper: bool = True):
"""
Compiles a set of bytecode instructions into a working function, using Python's bytecode
compiler.
:param code: A list of bytecode instructions.
:param consts: A list of constants to compile into the function.
:param names: A list of names to compile into the function.
:param varnames: A list of ``varnames`` to compile into the function.
:param func_name: The name of the function to use.
:param arg_count: The number of arguments this function takes. Must be ``<= len(varnames)``.
:param kwarg_defaults: A tuple of defaults for kwargs.
:param use_safety_wrapper: Use the safety wrapper? This hijacks SystemError to print better \
stack traces.
"""
varnames = tuple(varnames)
consts = tuple(consts)
names = tuple(names)
# Flatten the code list.
code = util.flatten(code)
if arg_count > len(varnames):
raise CompileError("arg_count > len(varnames)")
if len(kwarg_defaults) > len(varnames):
raise CompileError("len(kwarg_defaults) > len(varnames)")
# Compile it.
bc = compile_bytecode(code)
dis.dis(bc)
# Check for a final RETURN_VALUE.
if PY36:
# TODO: Add Python 3.6 check
pass
else:
if bc[-1] != tokens.RETURN_VALUE:
raise CompileError(
"No default RETURN_VALUE. Add a `pyte.tokens.RETURN_VALUE` to the end of your "
"bytecode if you don't need one.")
# Set default flags
flags = 1 | 2 | 64
frame_data = inspect.stack()[1]
if sys.version_info[0:2] > (3, 3):
# Validate the stack.
stack_size = _simulate_stack(dis._get_instructions_bytes(
bc, constants=consts, names=names, varnames=varnames)
)
else:
warnings.warn("Cannot check stack for safety.")
stack_size = 99
# Generate optimization warnings.
_optimize_warn_pass(dis._get_instructions_bytes(bc, constants=consts, names=names, varnames=varnames))
obb = types.CodeType(
arg_count, # Varnames - used for arguments.
0, # Kwargs are not supported yet
len(varnames), # co_nlocals -> Non-argument local variables
stack_size, # Auto-calculated
flags, # 67 is default for a normal function.
bc, # co_code - use the bytecode we generated.
consts, # co_consts
names, # co_names, used for global calls.
varnames, # arguments
frame_data[1], # use <unknown, compiled>
func_name, # co_name
frame_data[2], # co_firstlineno, ignore this.
b'', # https://svn.python.org/projects/python/trunk/Objects/lnotab_notes.txt
(), # freevars - no idea what this does
() # cellvars - used for nested functions - we don't use these.
)
# Update globals
f_globals = frame_data[0].f_globals
# Create a function type.
f = types.FunctionType(obb, f_globals)
f.__name__ = func_name
f.__defaults__ = kwarg_defaults
if use_safety_wrapper:
def __safety_wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except SystemError as e:
if 'opcode' not in ' '.join(e.args):
# Re-raise any non opcode related errors.
raise
msg = "Bytecode exception!" \
"\nFunction {} returned an invalid opcode." \
"\nFunction dissection:\n\n".format(f.__name__)
# dis sucks and always prints to stdout
# so we capture it
file = io.StringIO()
with contextlib.redirect_stdout(file):
dis.dis(f)
msg += file.getvalue()
raise SystemError(msg) from e
returned_func = __safety_wrapper
returned_func.wrapped = f
else:
returned_func = f
# return the func
return returned_func | 0.001922 |
def read(in_path):
""" Read a grp file at the path specified by in_path.
Args:
in_path (string): path to GRP file
Returns:
grp (list)
"""
assert os.path.exists(in_path), "The following GRP file can't be found. in_path: {}".format(in_path)
with open(in_path, "r") as f:
lines = f.readlines()
# need the second conditional to ignore comment lines
grp = [line.strip() for line in lines if line and not re.match("^#", line)]
return grp | 0.005952 |
def load_states():
'''
This loads our states into the salt __context__
'''
states = {}
# the loader expects to find pillar & grain data
__opts__['grains'] = salt.loader.grains(__opts__)
__opts__['pillar'] = __pillar__
lazy_utils = salt.loader.utils(__opts__)
lazy_funcs = salt.loader.minion_mods(__opts__, utils=lazy_utils)
lazy_serializers = salt.loader.serializers(__opts__)
lazy_states = salt.loader.states(__opts__,
lazy_funcs,
lazy_utils,
lazy_serializers)
# TODO: some way to lazily do this? This requires loading *all* state modules
for key, func in six.iteritems(lazy_states):
if '.' not in key:
continue
mod_name, func_name = key.split('.', 1)
if mod_name not in states:
states[mod_name] = {}
states[mod_name][func_name] = func
__context__['pyobjects_states'] = states | 0.005388 |
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b''.join(iterator)
return first_item + u''.join(iterator) | 0.003367 |
def get_cmd_output_now(self, exe, suggest_filename=None,
root_symlink=False, timeout=300, stderr=True,
chroot=True, runat=None, env=None,
binary=False, sizelimit=None, pred=None):
"""Execute a command and save the output to a file for inclusion in the
report.
"""
if not self.test_predicate(cmd=True, pred=pred):
self._log_info("skipped cmd output '%s' due to predicate (%s)" %
(exe, self.get_predicate(cmd=True, pred=pred)))
return None
return self._get_cmd_output_now(exe, timeout=timeout, stderr=stderr,
chroot=chroot, runat=runat,
env=env, binary=binary,
sizelimit=sizelimit) | 0.005734 |
def http_basic_auth_superuser_required(func=None):
"""Decorator. Use it to specify a RPC method is available only to logged superusers"""
wrapper = auth.set_authentication_predicate(http_basic_auth_check_user, [auth.user_is_superuser])
# If @http_basic_auth_superuser_required() is used (with parenthesis)
if func is None:
return wrapper
# If @http_basic_auth_superuser_required is used without parenthesis
return wrapper(func) | 0.006494 |
def get_resource(self, path, params=None):
"""
O365 GET method. Return representation of the requested resource.
"""
url = '%s%s' % (path, self._param_list(params))
headers = {
'Accept': 'application/json;odata=minimalmetadata'
}
response = O365_DAO().getURL(self._url(url), headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return json_loads(response.data) | 0.003953 |
def dragEnterEvent(self, event):
"""Override Qt method"""
mimeData = event.mimeData()
formats = list(mimeData.formats())
if "parent-id" in formats and \
int(mimeData.data("parent-id")) == id(self.ancestor):
event.acceptProposedAction()
QTabBar.dragEnterEvent(self, event) | 0.008721 |
def get(property_name):
"""
Returns the value of the specified configuration property.
Property values stored in the user configuration file take
precedence over values stored in the system configuration
file.
:param property_name: The name of the property to retrieve.
:return: The value of the property.
"""
config = _read_config(_USER_CONFIG_FILE)
section = _MAIN_SECTION_NAME
try:
property_value = config.get(section, property_name)
except (NoOptionError, NoSectionError) as error:
# Try the system config file
try:
config = _read_config(_SYSTEM_CONFIG_FILE)
property_value = config.get(section, property_name)
except (NoOptionError, NoSectionError) as error:
raise NoConfigOptionError(error)
return property_value | 0.001189 |
def wait_process(self):
"""Wait for the process to finish"""
self.process.wait()
if self.analyze_data:
self.receiving_thread.join() | 0.011976 |
def onBinaryMessage(self, msg, fromClient):
data = bytearray()
data.extend(msg)
"""
self.print_debug("message length: {}".format(len(data)))
self.print_debug("message data: {}".format(hexlify(data)))
"""
try:
self.queue.put_nowait(data)
except asyncio.QueueFull:
pass | 0.041237 |
def one_vertical_total_stress(self, z_c):
"""
Determine the vertical total stress at a single depth z_c.
:param z_c: depth from surface
"""
total_stress = 0.0
depths = self.depths
end = 0
for layer_int in range(1, len(depths) + 1):
l_index = layer_int - 1
if z_c > depths[layer_int - 1]:
if l_index < len(depths) - 1 and z_c > depths[l_index + 1]:
height = depths[l_index + 1] - depths[l_index]
bottom_depth = depths[l_index + 1]
else:
end = 1
height = z_c - depths[l_index]
bottom_depth = z_c
if bottom_depth <= self.gwl:
total_stress += height * self.layer(layer_int).unit_dry_weight
else:
if self.layer(layer_int).unit_sat_weight is None:
raise AnalysisError("Saturated unit weight not defined for layer %i." % layer_int)
sat_height = bottom_depth - max(self.gwl, depths[l_index])
dry_height = height - sat_height
total_stress += dry_height * self.layer(layer_int).unit_dry_weight + \
sat_height * self.layer(layer_int).unit_sat_weight
else:
end = 1
if end:
break
return total_stress | 0.004781 |
def _get_memory_contents(self):
"""Runs the scheduler to determine memory contents at every point in time.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
GetAllOperationNames()).
"""
if self._memory_contents is not None:
return self._memory_contents
schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg)
self._memory_contents = self._graph.compute_memory_contents_under_schedule(
schedule)
return self._memory_contents | 0.003268 |
def get_find_executions_string(desc, has_children, single_result=False, show_outputs=True,
is_cached_result=False):
'''
:param desc: hash of execution's describe output
:param has_children: whether the execution has children to be printed
:param single_result: whether the execution is displayed as a single result or as part of an execution tree
:param is_cached_result: whether the execution should be formatted as a cached result
'''
is_not_subjob = desc['parentJob'] is None or desc['class'] == 'analysis' or single_result
result = ("* " if is_not_subjob and get_delimiter() is None else "")
canonical_execution_name = desc['executableName']
if desc['class'] == 'job':
canonical_execution_name += ":" + desc['function']
execution_name = desc.get('name', '<no name>')
# Format the name of the execution
if is_cached_result:
result += BOLD() + "[" + ENDC()
result += BOLD() + BLUE()
if desc['class'] == 'analysis':
result += UNDERLINE()
result += execution_name + ENDC()
if execution_name != canonical_execution_name and execution_name+":main" != canonical_execution_name:
result += ' (' + canonical_execution_name + ')'
if is_cached_result:
result += BOLD() + "]" + ENDC()
# Format state
result += DELIMITER(' (') + JOB_STATES(desc['state']) + DELIMITER(') ') + desc['id']
# Add unicode pipe to child if necessary
result += DELIMITER('\n' + (u'│ ' if is_not_subjob and has_children else (" " if is_not_subjob else "")))
result += desc['launchedBy'][5:] + DELIMITER(' ')
result += render_short_timestamp(desc['created'])
cached_and_runtime_strs = []
if is_cached_result:
cached_and_runtime_strs.append(YELLOW() + "cached" + ENDC())
if desc['class'] == 'job':
# Only print runtime if it ever started running
if desc.get('startedRunning'):
if desc['state'] in ['done', 'failed', 'terminated', 'waiting_on_output']:
runtime = datetime.timedelta(seconds=int(desc['stoppedRunning']-desc['startedRunning'])//1000)
cached_and_runtime_strs.append("runtime " + str(runtime))
elif desc['state'] == 'running':
seconds_running = max(int(time.time()-desc['startedRunning']//1000), 0)
msg = "running for {rt}".format(rt=datetime.timedelta(seconds=seconds_running))
cached_and_runtime_strs.append(msg)
if cached_and_runtime_strs:
result += " (" + ", ".join(cached_and_runtime_strs) + ")"
if show_outputs:
prefix = DELIMITER('\n' + (u'│ ' if is_not_subjob and has_children else (" " if is_not_subjob else "")))
if desc.get("output") != None:
result += job_output_to_str(desc['output'], prefix=prefix)
elif desc['state'] == 'failed' and 'failureReason' in desc:
result += prefix + BOLD() + desc['failureReason'] + ENDC() + ": " + fill(desc.get('failureMessage', ''),
subsequent_indent=prefix.lstrip('\n'))
return result | 0.005036 |
def K_separator_demister_York(P, horizontal=False):
r'''Calculates the Sounders Brown `K` factor as used in determining maximum
permissible gas velocity in a two-phase separator in either a horizontal or
vertical orientation, *with a demister*.
This function is a curve fit to [1]_ published in [2]_ and is widely used.
For 1 < P < 15 psia:
.. math::
K = 0.1821 + 0.0029P + 0.0460\ln P
For 15 <= P <= 40 psia:
.. math::
K = 0.35
For P < 5500 psia:
.. math::
K = 0.430 - 0.023\ln P
In the above equations, P is in units of psia.
Parameters
----------
P : float
Pressure of separator, [Pa]
horizontal : bool, optional
Whether to use the vertical or horizontal value; horizontal is 1.25
times higher, [-]
Returns
-------
K : float
Sounders Brown Horizontal or vertical `K` factor for two-phase
separator design with a demister, [m/s]
Notes
-----
If the input pressure is under 1 psia, 1 psia is used. If the
input pressure is over 5500 psia, 5500 psia is used.
Examples
--------
>>> K_separator_demister_York(975*psi)
0.08281536035331669
References
----------
.. [2] Otto H. York Company, "Mist Elimination in Gas Treatment Plants and
Refineries," Engineering, Parsippany, NJ.
.. [1] Svrcek, W. Y., and W. D. Monnery. "Design Two-Phase Separators
within the Right Limits" Chemical Engineering Progress, (October 1,
1993): 53-60.
'''
P = P/psi # Correlation in terms of psia
if P < 15:
if P < 1:
P = 1 # Prevent negative K values, but as a consequence be
# optimistic for K values; limit is 0.185 ft/s but real values
# should probably be lower
K = 0.1821 + 0.0029*P + 0.0460*log(P)
elif P < 40:
K = 0.35
else:
if P > 5500:
P = 5500 # Do not allow for lower K values above 5500 psia, as
# the limit is stated to be 5500
K = 0.430 - 0.023*log(P)
K *= foot # Converts units of ft/s to m/s; the graph and all fits are in ft/s
if horizontal:
# Watkins recommends a factor of 1.25 for horizontal separators over
# vertical separators as well
K *= 1.25
return K | 0.010947 |
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of trees with built-in support
:arg treeType: the name of the tree type required (case-insensitive). Supported
values are:
* "dom" - A generic builder for DOM implementations, defaulting to a
xml.dom.minidom based implementation.
* "etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to xml.etree.cElementTree if
available and xml.etree.ElementTree if not.
* "lxml" - A etree-based builder for lxml.etree, handling limitations
of lxml's implementation.
:arg implementation: (Currently applies to the "etree" and "dom" tree
types). A module implementing the tree type e.g. xml.etree.ElementTree
or xml.etree.cElementTree.
:arg kwargs: Any additional options to pass to the TreeBuilder when
creating it.
Example:
>>> from html5lib.treebuilders import getTreeBuilder
>>> builder = getTreeBuilder('etree')
"""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType) | 0.000937 |
def sparse_surface(self):
"""
Filled cells on the surface of the mesh.
Returns
----------------
voxels: (n, 3) int, filled cells on mesh surface
"""
if self._method == 'ray':
func = voxelize_ray
elif self._method == 'subdivide':
func = voxelize_subdivide
else:
raise ValueError('voxelization method incorrect')
voxels, origin = func(
mesh=self._data['mesh'],
pitch=self._data['pitch'],
max_iter=self._data['max_iter'][0])
self._cache['origin'] = origin
return voxels | 0.00314 |
def mode(self, predicate, args, recall=1, head=False):
'''
Emits mode declarations in Aleph-like format.
:param predicate: predicate name
:param args: predicate arguments with input/output specification, e.g.:
>>> [('+', 'train'), ('-', 'car')]
:param recall: recall setting (see `Aleph manual <http://www.cs.ox.ac.uk/activities/machinelearning/Aleph/aleph>`_)
:param head: set to True for head clauses
'''
return ':- mode%s(%s, %s(%s)).' % (
'h' if head else 'b', str(recall), predicate, ','.join([t + arg for t, arg in args])) | 0.007874 |
def _authority(scheme=DEFAULT_SCHEME, host=DEFAULT_HOST, port=DEFAULT_PORT):
"""Construct a URL authority from the given *scheme*, *host*, and *port*.
Named in accordance with RFC2396_, which defines URLs as::
<scheme>://<authority><path>?<query>
.. _RFC2396: http://www.ietf.org/rfc/rfc2396.txt
So ``https://localhost:8000/a/b/b?boris=hilda`` would be parsed as::
scheme := https
authority := localhost:8000
path := /a/b/c
query := boris=hilda
:param scheme: URL scheme (the default is "https")
:type scheme: "http" or "https"
:param host: The host name (the default is "localhost")
:type host: string
:param port: The port number (the default is 8089)
:type port: integer
:return: The URL authority.
:rtype: UrlEncoded (subclass of ``str``)
**Example**::
_authority() == "https://localhost:8089"
_authority(host="splunk.utopia.net") == "https://splunk.utopia.net:8089"
_authority(host="2001:0db8:85a3:0000:0000:8a2e:0370:7334") == \
"https://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8089"
_authority(scheme="http", host="splunk.utopia.net", port="471") == \
"http://splunk.utopia.net:471"
"""
if ':' in host:
# IPv6 addresses must be enclosed in [ ] in order to be well
# formed.
host = '[' + host + ']'
return UrlEncoded("%s://%s:%s" % (scheme, host, port), skip_encode=True) | 0.001356 |
def encode_metadata_request(cls, client_id, correlation_id, topics=None,
payloads=None):
"""
Encode a MetadataRequest
Arguments:
client_id: string
correlation_id: int
topics: list of strings
"""
if payloads is None:
topics = [] if topics is None else topics
else:
topics = payloads
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.METADATA_KEY))
message.append(struct.pack('>i', len(topics)))
for topic in topics:
message.append(struct.pack('>h%ds' % len(topic), len(topic), topic))
msg = b''.join(message)
return write_int_string(msg) | 0.004779 |
def _credssp_processor(self, context):
"""
Implements a state machine
:return:
"""
http_response = (yield)
credssp_context = self._get_credssp_header(http_response)
if credssp_context is None:
raise Exception('The remote host did not respond with a \'www-authenticate\' header containing '
'\'CredSSP\' as an available authentication mechanism')
# 1. First, secure the channel with a TLS Handshake
if not credssp_context:
self.tls_connection = SSL.Connection(self.tls_context)
self.tls_connection.set_connect_state()
while True:
try:
self.tls_connection.do_handshake()
except SSL.WantReadError:
http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096))
credssp_context = self._get_credssp_header(http_response)
if credssp_context is None or not credssp_context:
raise Exception('The remote host rejected the CredSSP TLS handshake')
self.tls_connection.bio_write(credssp_context)
else:
break
# add logging to display the negotiated cipher (move to a function)
openssl_lib = _util.binding.lib
ffi = _util.binding.ffi
cipher = openssl_lib.SSL_get_current_cipher(self.tls_connection._ssl)
cipher_name = ffi.string( openssl_lib.SSL_CIPHER_get_name(cipher))
log.debug("Negotiated TLS Cipher: %s", cipher_name)
# 2. Send an TSRequest containing an NTLM Negotiate Request
context_generator = context.initialize_security_context()
negotiate_token = context_generator.send(None)
log.debug("NTLM Type 1: %s", AsHex(negotiate_token))
ts_request = TSRequest()
ts_request['negoTokens'] = negotiate_token
self.tls_connection.send(ts_request.getData())
http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096))
# Extract and decrypt the encoded TSRequest response struct from the Negotiate header
authenticate_header = self._get_credssp_header(http_response)
if not authenticate_header or authenticate_header is None:
raise Exception("The remote host rejected the CredSSP negotiation token")
self.tls_connection.bio_write(authenticate_header)
# NTLM Challenge Response and Server Public Key Validation
ts_request = TSRequest()
ts_request.fromString(self.tls_connection.recv(8192))
challenge_token = ts_request['negoTokens']
log.debug("NTLM Type 2: %s", AsHex(challenge_token))
server_cert = self.tls_connection.get_peer_certificate()
# not using channel bindings
#certificate_digest = base64.b16decode(server_cert.digest('SHA256').replace(':', ''))
## channel_binding_structure = gss_channel_bindings_struct()
## channel_binding_structure['application_data'] = "tls-server-end-point:" + certificate_digest
public_key = HttpCredSSPAuth._get_rsa_public_key(server_cert)
# The _RSAPublicKey must be 'wrapped' using the negotiated GSSAPI mechanism and send to the server along with
# the final SPNEGO token. This step of the CredSSP protocol is designed to thwart 'man-in-the-middle' attacks
# Build and encrypt the response to the server
ts_request = TSRequest()
type3= context_generator.send(challenge_token)
log.debug("NTLM Type 3: %s", AsHex(type3))
ts_request['negoTokens'] = type3
public_key_encrypted, signature = context.wrap_message(public_key)
ts_request['pubKeyAuth'] = signature + public_key_encrypted
self.tls_connection.send(ts_request.getData())
enc_type3 = self.tls_connection.bio_read(8192)
http_response = yield self._set_credssp_header(http_response.request, enc_type3)
# TLS decrypt the response, then ASN decode and check the error code
auth_response = self._get_credssp_header(http_response)
if not auth_response or auth_response is None:
raise Exception("The remote host rejected the challenge response")
self.tls_connection.bio_write(auth_response)
ts_request = TSRequest()
ts_request.fromString(self.tls_connection.recv(8192))
# TODO: determine how to validate server certificate here
#a = ts_request['pubKeyAuth']
# print ":".join("{:02x}".format(ord(c)) for c in a)
# 4. Send the Credentials to be delegated, these are encrypted with both NTLM v2 and then by TLS
tsp = TSPasswordCreds()
tsp['domain'] = self.password_authenticator.get_domain()
tsp['username'] = self.password_authenticator.get_username()
tsp['password'] = self.password_authenticator.get_password()
tsc = TSCredentials()
tsc['type'] = 1
tsc['credentials'] = tsp.getData()
ts_request = TSRequest()
encrypted, signature = context.wrap_message(tsc.getData())
ts_request['authInfo'] = signature + encrypted
self.tls_connection.send(ts_request.getData())
token = self.tls_connection.bio_read(8192)
http_response.request.body = self.body
http_response = yield self._set_credssp_header(self._encrypt(http_response.request, self.tls_connection), token)
if http_response.status_code == 401:
raise Exception('Authentication Failed') | 0.003918 |
def _report(self, action, key_mapper=mappers._report_key_mapper):
'''Return the dictionary of **kwargs with the correct datums attribute
names and data types for the top level of the report, and return the
nested levels separately.
'''
_top_level = [
k for k, v in self.report.items() if not isinstance(v, dict)]
_nested_level = [
k for k, v in self.report.items() if isinstance(v, dict)]
top_level_dict = {}
nested_levels_dict = {}
for key in _top_level:
try:
if key == 'date' or key == 'timestamp':
item = mappers._key_type_mapper[key](
str(self.report[key]), **{'ignoretz': True})
else:
item = mappers._key_type_mapper[key](str(
self.report[key]) if key != 'draft' else self.report[key])
except KeyError:
item = self.report[key]
finally:
try:
top_level_dict[key_mapper[key]] = item
except KeyError:
warnings.warn('''
{0} is not currently supported by datums and will be ignored.
Would you consider submitting an issue to add support?
https://www.github.com/thejunglejane/datums/issues
'''.format(key))
for key in _nested_level:
nested_levels_dict[key] = self.report[key]
# Add the parent report ID
nested_levels_dict[key][
'reportUniqueIdentifier'] = mappers._key_type_mapper[
'uniqueIdentifier'](str(self.report['uniqueIdentifier']))
if key == 'placemark':
# Add the parent location report UUID
nested_levels_dict[key][
'locationUniqueIdentifier'] = nested_levels_dict[key].pop(
'reportUniqueIdentifier')
# Create UUID for altitude report if there is not one and the action
# is get_or_create, else delete the altitude report from the nested
# levels and warn that it will not be updated
if 'uniqueIdentifier' not in nested_levels_dict[key]:
if action.__func__.func_name == 'get_or_create':
nested_levels_dict[key]['uniqueIdentifier'] = uuid.uuid4()
else:
del nested_levels_dict[key]
warnings.warn('''
No uniqueIdentifier found for AltitudeReport in {0}.
Existing altitude report will not be updated.
'''.format(self.report['uniqueIdentifier']))
return top_level_dict, nested_levels_dict | 0.001776 |
def grow(script, iterations=1):
""" Grow (dilate, expand) the current set of selected faces
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): the number of times to grow the selection.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ' <filter name="Dilate Selection"/>\n'
for _ in range(iterations):
util.write_filter(script, filter_xml)
return None | 0.001912 |
def vector_distance(a, b):
"""The Euclidean distance between two vectors."""
a = np.array(a)
b = np.array(b)
return np.linalg.norm(a - b) | 0.006536 |
def release(self, key: str) -> Optional[str]:
"""
Release the lock.
Noop if not locked.
:param key: the name of the lock to release
:return: the name of the released lock, or `None` if no lock was released
:raises InvalidKeyError: if the `key` is not valid
"""
ConsulLockManager.validate_key(key)
key_value = self.consul_client.kv.get(key)[1]
if key_value is None:
logger.info(f"No lock found")
return None
lock_information = json.loads(key_value["Value"].decode("utf-8"), cls=ConsulLockInformationJSONDecoder)
logger.info(f"Destroying the session {lock_information.session_id} that is holding the lock")
unlocked = self.consul_client.session.destroy(session_id=lock_information.session_id)
# This instance might be managing the removed session
try:
self._acquiring_session_ids.remove(lock_information.session_id)
except KeyError:
pass
logger.info("Unlocked" if unlocked else "Went to unlock but was already released upon sending request")
return key | 0.006108 |
def com_google_fonts_check_version_bump(ttFont,
api_gfonts_ttFont,
github_gfonts_ttFont):
"""Version number has increased since previous release on Google Fonts?"""
v_number = ttFont["head"].fontRevision
api_gfonts_v_number = api_gfonts_ttFont["head"].fontRevision
github_gfonts_v_number = github_gfonts_ttFont["head"].fontRevision
failed = False
if v_number == api_gfonts_v_number:
failed = True
yield FAIL, ("Version number {} is equal to"
" version on Google Fonts.").format(v_number)
if v_number < api_gfonts_v_number:
failed = True
yield FAIL, ("Version number {} is less than"
" version on Google Fonts ({})."
"").format(v_number,
api_gfonts_v_number)
if v_number == github_gfonts_v_number:
failed = True
yield FAIL, ("Version number {} is equal to"
" version on Google Fonts GitHub repo."
"").format(v_number)
if v_number < github_gfonts_v_number:
failed = True
yield FAIL, ("Version number {} is less than"
" version on Google Fonts GitHub repo ({})."
"").format(v_number,
github_gfonts_v_number)
if not failed:
yield PASS, ("Version number {} is greater than"
" version on Google Fonts GitHub ({})"
" and production servers ({})."
"").format(v_number,
github_gfonts_v_number,
api_gfonts_v_number) | 0.006757 |
def _generateChildren(self):
"""Generator which yields all AXChildren of the object."""
try:
children = self.AXChildren
except _a11y.Error:
return
if children:
for child in children:
yield child | 0.007194 |
def check_for_cores(self):
"""! @brief Init task: verify that at least one core was discovered."""
if not len(self.cores):
# Allow the user to override the exception to enable uses like chip bringup.
if self.session.options.get('allow_no_cores', False):
logging.error("No cores were discovered!")
else:
raise exceptions.DebugError("No cores were discovered!") | 0.006772 |
def check_params(self, *keys):
"""Ensure user has set required values in weather.ini.
Normally the :py:data:`~ServiceBase.config` names with
``required`` set are checked, but if your uploader has a
``register`` method you may need to check for other data.
:param str keys: the :py:data:`~ServiceBase.config` names to
verify.
"""
for key in keys:
if not self.params[key]:
raise RuntimeError('"{}" not set in weather.ini'.format(key)) | 0.003766 |
def to_json(value, pretty=False):
"""
Serializes the given value to JSON.
:param value: the value to serialize
:param pretty:
whether or not to format the output in a more human-readable way; if
not specified, defaults to ``False``
:type pretty: bool
:rtype: str
"""
options = {
'sort_keys': False,
'cls': BasicJSONEncoder,
}
if pretty:
options['indent'] = 2
options['separators'] = (',', ': ')
return json.dumps(value, **options) | 0.001905 |
def rmvsuffix(subject):
"""
Remove the suffix from *subject*.
"""
index = subject.rfind('.')
if index > subject.replace('\\', '/').rfind('/'):
subject = subject[:index]
return subject | 0.025 |
def scale_and_shift_cmap(self, scale_pct, shift_pct):
"""Stretch and/or shrink the color map.
See :meth:`ginga.RGBMap.RGBMapper.scale_and_shift`.
"""
rgbmap = self.get_rgbmap()
rgbmap.scale_and_shift(scale_pct, shift_pct) | 0.007634 |
def set_boot_order(self, position, device):
"""Puts the given device to the specified position in
the boot order.
To indicate that no device is associated with the given position,
:py:attr:`DeviceType.null` should be used.
@todo setHardDiskBootOrder(), setNetworkBootOrder()
in position of type int
Position in the boot order (@c 1 to the total number of
devices the machine can boot from, as returned by
:py:func:`ISystemProperties.max_boot_position` ).
in device of type :class:`DeviceType`
The type of the device used to boot at the given position.
raises :class:`OleErrorInvalidarg`
Boot @a position out of range.
raises :class:`OleErrorNotimpl`
Booting from USB @a device currently not supported.
"""
if not isinstance(position, baseinteger):
raise TypeError("position can only be an instance of type baseinteger")
if not isinstance(device, DeviceType):
raise TypeError("device can only be an instance of type DeviceType")
self._call("setBootOrder",
in_p=[position, device]) | 0.007258 |
def POST_AUTH(self, courseid): # pylint: disable=arguments-differ
""" POST request """
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False)
msg = ""
error = False
data = web.input()
if not data.get("token", "") == self.user_manager.session_token():
msg = _("Operation aborted due to invalid token.")
error = True
elif "wipeall" in data:
if not data.get("courseid", "") == courseid:
msg = _("Wrong course id.")
error = True
else:
try:
self.dump_course(courseid)
msg = _("All course data have been deleted.")
except:
msg = _("An error occurred while dumping course from database.")
error = True
elif "restore" in data:
if "backupdate" not in data:
msg = "No backup date selected."
error = True
else:
try:
dt = datetime.datetime.strptime(data["backupdate"], "%Y%m%d.%H%M%S")
self.restore_course(courseid, data["backupdate"])
msg = _("Course restored to date : {}.").format(dt.strftime("%Y-%m-%d %H:%M:%S"))
except:
msg = _("An error occurred while restoring backup.")
error = True
elif "deleteall" in data:
if not data.get("courseid", "") == courseid:
msg = _("Wrong course id.")
error = True
else:
try:
self.delete_course(courseid)
web.seeother(self.app.get_homepath() + '/index')
except:
msg = _("An error occurred while deleting the course data.")
error = True
return self.page(course, msg, error) | 0.005115 |
def build(self, spec, reset=True):
'''
Compile the Stan model from an abstract model specification.
Args:
spec (Model): A bambi Model instance containing the abstract
specification of the model to compile.
reset (bool): if True (default), resets the StanBackEnd instance
before compiling.
'''
if reset:
self.reset()
n_cases = len(spec.y.data)
self.data.append('int<lower=1> N;')
self.X['N'] = n_cases
def _sanitize_name(name):
''' Stan only allows alphanumeric chars and underscore, and
variable names must begin with a letter. Additionally, Stan
reserves a few hundred strings that can't be used as variable
names. So to play it safe, we replace all invalid chars with '_',
and prepend all variables with 'b_'. We substitute the original
names back in later. '''
if name in self._original_names:
return name
clean = 'b_' + re.sub('[^a-zA-Z0-9\_]+', '_', name)
self._original_names[clean] = name
return clean
def _map_dist(dist, **kwargs):
''' Maps PyMC3 distribution names and attrs in the Prior object
to the corresponding Stan names and argument order. '''
if dist not in self.dists:
raise ValueError("There is no distribution named '%s' "
"in Stan." % dist)
stan_dist = self.dists[dist]
dist_name = stan_dist['name']
dist_args = stan_dist['args']
dist_bounds = stan_dist.get('bounds', '')
# Flat/HalfFlat/undefined priors are handled separately
if dist_name is None:
return None, dist_bounds
lookup_args = [a[1:] for a in dist_args if a.startswith('#')]
missing = set(lookup_args) - set(list(kwargs.keys()))
if missing:
raise ValueError("The following mandatory parameters of "
"the %s distribution are missing: %s."
% (dist, missing))
# Named arguments to take from the Prior object are denoted with
# a '#'; otherwise we take the value in the self.dists dict as-is.
dp = [kwargs[p[1:]] if p.startswith('#') else p for p in dist_args]
# Sometimes we get numpy arrays at this stage, so convert to float
dp = [float(p.ravel()[0]) if isinstance(p, np.ndarray) else p
for p in dp]
dist_term = '%s(%s)' % (dist_name, ', '.join([str(p) for p in dp]))
# handle Uniform variables, for which the bounds are the parameters
if dist_name=='uniform':
dist_bounds = dist_bounds.format(*dp)
return dist_term, dist_bounds
def _add_data(name, data, term):
''' Add all model components that directly touch or relate to data.
'''
if data.shape[1] == 1:
# For random effects, index into grouping variable
if n_cols > 1:
index_name = _sanitize_name('%s_grp_ind' % name)
self.data.append('int %s[N];' % index_name)
self.X[index_name] = t.group_index + 1 # 1-based indexing
predictor = 'vector[N] %s;'
else:
predictor = ('matrix[N, %d]' % (n_cols)) + ' %s;'
data_name = _sanitize_name('%s_data' % name)
var_name = _sanitize_name(name)
self.data.append(predictor % data_name)
self.X[data_name] = data.squeeze()
if data.shape[1] == 1 and n_cols > 1:
code = '%s[%s[n]] * %s[n]' % (var_name, index_name, data_name)
self.mu_cat.append(code)
else:
self.mu_cont.append('%s * %s' % (data_name, var_name))
def _add_parameters(name, dist_name, n_cols, **dist_args):
''' Add all model components related to latent parameters. We
handle these separately from the data components, as the parameters
can have nested specifications (in the case of random effects). '''
def _expand_args(k, v, name):
if isinstance(v, Prior):
name = _sanitize_name('%s_%s' % (name, k))
return _add_parameters(name, v.name, 1, **v.args)
return v
kwargs = {k: _expand_args(k, v, name)
for (k, v) in dist_args.items()}
_dist, _bounds = _map_dist(dist_name, **kwargs)
if n_cols == 1:
stan_par = 'real'
else:
stan_par = 'vector[%d]' % n_cols
var_name = _sanitize_name(name)
# non-centered parameterization
if spec.noncentered and 'sd' in kwargs and \
isinstance(kwargs['sd'], string_types):
offset_name = _sanitize_name(name + '_offset')
offset = 'vector[%d] %s;' % (n_cols, offset_name)
self.parameters.append(offset)
self.model.append('%s ~ normal(0, 1);' % offset_name)
self.transformed_parameters.append('%s%s %s;' % (stan_par,
_bounds,
var_name))
trans = '%s = multiply(%s, %s);' % (var_name, offset_name,
kwargs['sd'])
self.expressions.append(trans)
else:
self.parameters.append('%s%s %s;' % (stan_par, _bounds,
var_name))
if _dist is not None:
self.model.append('%s ~ %s;' % (var_name, _dist))
return name
for t in spec.terms.values():
data = t.data
label = t.name
dist_name = t.prior.name
dist_args = t.prior.args
n_cols = data.shape[1]
if t.random:
data = t.predictor
# Add to Stan model
_add_data(label, data, t)
_add_parameters(label, dist_name, n_cols, **dist_args)
# yhat
self.transformed_parameters.append('vector[N] yhat;')
if self.mu_cont:
yhat_cont = 'yhat = %s;' % ' + '.join(self.mu_cont)
self.expressions.append(yhat_cont)
else:
self.mu_cat.insert(0, '0')
if self.mu_cat:
loops = ('for (n in 1:N)\n\t\tyhat[n] = yhat[n] + %s'
% ' + '.join(self.mu_cat) + ';\n\t')
self.expressions.append(loops)
# Add expressions that go in transformed parameter block (they have
# to come after variable definitions)
self.transformed_parameters += self.expressions
# add response variable (y)
_response_format = self.families[spec.family.name]['format']
self.data.append('{} y{};'.format(*_response_format))
# add response distribution parameters other than the location
# parameter
for k, v in spec.family.prior.args.items():
if k != spec.family.parent and isinstance(v, Prior):
_bounds = _map_dist(v.name, **v.args)[1]
_param = 'real{} {}_{};'.format(_bounds, spec.y.name, k)
self.parameters.append(_param)
# specify the response distribution
_response_dist = self.families[spec.family.name]['name']
_response_args = '{}(yhat)'.format(self.links[spec.family.link])
_response_args = {spec.family.parent: _response_args}
for k, v in spec.family.prior.args.items():
if k != spec.family.parent:
_response_args[k] = '{}_{}'.format(spec.y.name, k) \
if isinstance(v, Prior) else str(v)
_dist = _map_dist(_response_dist, **_response_args)[0]
self.model.append('y ~ {};'.format(_dist))
# add the data
_response_type = self.families[spec.family.name]['type']
self.X['y'] = spec.y.data.astype(_response_type).squeeze()
# Construct the stan script
def format_block(name):
key = name.replace(' ', '_')
els = ''.join(['\t%s\n' % e for e in getattr(self, key)])
return '%s {\n%s}\n' % (name, els)
blocks = ['data', 'transformed data', 'parameters',
'transformed parameters', 'model']
self.model_code = ''.join([format_block(bl) for bl in blocks])
self.spec = spec
self.stan_model = ps.StanModel(model_code=self.model_code) | 0.000453 |
def thousandg_link(variant_obj, build=None):
"""Compose link to 1000G page for detailed information."""
dbsnp_id = variant_obj.get('dbsnp_id')
build = build or 37
if not dbsnp_id:
return None
if build == 37:
url_template = ("http://grch37.ensembl.org/Homo_sapiens/Variation/Explore"
"?v={};vdb=variation")
else:
url_template = ("http://www.ensembl.org/Homo_sapiens/Variation/Explore"
"?v={};vdb=variation")
return url_template.format(dbsnp_id) | 0.003663 |
def _populate_spelling_error(word,
suggestions,
contents,
line_offset,
column_offset,
message_start):
"""Create a LinterFailure for word.
This function takes suggestions from :suggestions: and uses it to
populate the message and candidate replacement. The replacement will
be a line in :contents:, as determined by :line_offset: and
:column_offset:.
"""
error_line = contents[line_offset]
if len(suggestions):
char_word_offset = (column_offset + len(word))
replacement = (error_line[:column_offset] +
suggestions[0] +
error_line[char_word_offset:])
else:
replacement = None
if len(suggestions):
suggestions_text = (""", perhaps you meant """ +
" ".join(suggestions))
else:
suggestions_text = ""
format_desc = message_start + suggestions_text
return LinterFailure(format_desc,
line_offset + 1,
replacement) | 0.000855 |
def read(calc_id, username=None):
"""
:param calc_id: a calculation ID
:param username: if given, restrict the search to the user's calculations
:returns: the associated DataStore instance
"""
if isinstance(calc_id, str) or calc_id < 0 and not username:
# get the last calculation in the datastore of the current user
return datastore.read(calc_id)
job = logs.dbcmd('get_job', calc_id, username)
if job:
return datastore.read(job.ds_calc_dir + '.hdf5')
else:
# calc_id can be present in the datastore and not in the database:
# this happens if the calculation was run with `oq run`
return datastore.read(calc_id) | 0.001437 |
def get_all_upper(self):
"""Return all parent GO IDs through both 'is_a' and all relationships."""
all_upper = set()
for upper in self.get_goterms_upper():
all_upper.add(upper.item_id)
all_upper |= upper.get_all_upper()
return all_upper | 0.010274 |
def __query(domain, limit=100):
"""Using the shell script to query pdns.cert.at is a hack, but python raises an error every time using subprocess
functions to call whois. So this hack is avoiding calling whois directly. Ugly, but works.
:param domain: The domain pdns is queried with.
:type domain: str
:param limit: Maximum number of results
:type limit: int
:returns: str -- Console output from whois call.
:rtype: str
"""
s = check_output(['{}'.format(os.path.join(os.path.dirname(__file__), 'whois.sh')), '--limit {} {}'.format(limit, domain)], universal_newlines=True)
return s | 0.00638 |
def SHLD(cpu, dest, src, count):
"""
Double precision shift right.
Shifts the first operand (destination operand) to the left the number of bits specified by the third operand
(count operand). The second operand (source operand) provides bits to shift in from the right (starting with
the least significant bit of the destination operand).
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
:param count: count operand
"""
OperandSize = dest.size
tempCount = Operators.ZEXTEND(count.read(), OperandSize) & (OperandSize - 1)
arg0 = dest.read()
arg1 = src.read()
MASK = ((1 << OperandSize) - 1)
t0 = (arg0 << tempCount)
t1 = arg1 >> (OperandSize - tempCount)
res = Operators.ITEBV(OperandSize, tempCount == 0, arg0, t0 | t1)
res = res & MASK
dest.write(res)
if isinstance(tempCount, int) and tempCount == 0:
pass
else:
SIGN_MASK = 1 << (OperandSize - 1)
lastbit = 0 != ((arg0 << (tempCount - 1)) & SIGN_MASK)
cpu._set_shiftd_flags(OperandSize, arg0, res, lastbit, tempCount) | 0.004045 |
def getuserid(username, copyright_str):
"""Get the ID of the user with `username` from write-math.com. If he
doesn't exist by now, create it. Add `copyright_str` as a description.
Parameters
----------
username : string
Name of a user.
copyright_str : string
Description text of a user in Markdown format.
Returns
-------
int :
ID on write-math.com of the user.
"""
global username2id
if username not in username2id:
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
sql = ("INSERT IGNORE INTO `wm_users` ("
"`display_name` , "
"`password` ,"
"`account_type` ,"
"`confirmation_code` ,"
"`status` ,"
"`description`"
") "
"VALUES ("
"%s, '', 'Regular User', '', 'activated', %s"
");")
cursor.execute(sql, (username, copyright_str))
connection.commit()
# Get the id
try:
sql = ("SELECT `id` FROM `wm_users` "
"WHERE `display_name` = %s LIMIT 1")
cursor.execute(sql, username)
uid = cursor.fetchone()['id']
except Exception as inst:
logging.debug("username not found: %s", username)
print(inst)
# logging.info("%s: %s", username, uid)
username2id[username] = uid
return username2id[username] | 0.000539 |
def get_wordlist(language, word_source):
""" Takes in a language and a word source and returns a matching wordlist,
if it exists.
Valid languages: ['english']
Valid word sources: ['bip39', 'wiktionary', 'google']
"""
try:
wordlist_string = eval(language + '_words_' + word_source)
except NameError:
raise Exception("No wordlist could be found for the word source and language provided.")
wordlist = wordlist_string.split(',')
return wordlist | 0.00396 |
def setup(app):
"""Initialize Sphinx extension."""
app.setup_extension('nbsphinx')
app.add_source_suffix('.nblink', 'linked_jupyter_notebook')
app.add_source_parser(LinkedNotebookParser)
app.add_config_value('nbsphinx_link_target_root', None, rebuild='env')
return {'version': __version__, 'parallel_read_safe': True} | 0.002924 |
def get_imported_resource(self, context):
"""Get the imported resource
Returns `None` if module was not found.
"""
if self.level == 0:
return context.project.find_module(
self.module_name, folder=context.folder)
else:
return context.project.find_relative_module(
self.module_name, context.folder, self.level) | 0.004938 |
def get_archive_link(self, archive_format, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/:archive_format/:ref <http://developer.github.com/v3/repos/contents>`_
:param archive_format: string
:param ref: string
:rtype: string
"""
assert isinstance(archive_format, (str, unicode)), archive_format
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url = self.url + "/" + archive_format
if ref is not github.GithubObject.NotSet:
url += "/" + ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
url
)
return headers["location"] | 0.005495 |
def apply_substitutions(monomial, monomial_substitutions, pure=False):
"""Helper function to remove monomials from the basis."""
if is_number_type(monomial):
return monomial
original_monomial = monomial
changed = True
if not pure:
substitutions = monomial_substitutions
else:
substitutions = {}
for lhs, rhs in monomial_substitutions.items():
irrelevant = False
for atom in lhs.atoms():
if atom.is_Number:
continue
if not monomial.has(atom):
irrelevant = True
break
if not irrelevant:
substitutions[lhs] = rhs
while changed:
for lhs, rhs in substitutions.items():
monomial = fast_substitute(monomial, lhs, rhs)
if original_monomial == monomial:
changed = False
original_monomial = monomial
return monomial | 0.001041 |
def handle_error(errcode):
"""Error handler function. Translates an error code into an exception."""
if type(errcode) is c_int:
errcode = errcode.value
if errcode == 0:
pass # no error
elif errcode == -1:
raise TimeoutError("the operation failed due to a timeout.")
elif errcode == -2:
raise LostError("the stream has been lost.")
elif errcode == -3:
raise InvalidArgumentError("an argument was incorrectly specified.")
elif errcode == -4:
raise InternalError("an internal error has occurred.")
elif errcode < 0:
raise RuntimeError("an unknown error has occurred.") | 0.003053 |
def column_of_time(path, start, end=-1):
"""This function extracts the column of times from a ProCoDA data file.
:param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient.
:type path: string
:param start: Index of first row of data to extract from the data file
:type start: int
:param end: Index of last row of data to extract from the data. Defaults to last row
:type end: int
:return: Experimental times starting at 0 day with units of days.
:rtype: numpy.array
:Examples:
.. code-block:: python
time = column_of_time("Reactor_data.txt", 0)
"""
df = pd.read_csv(path, delimiter='\t')
start_time = pd.to_numeric(df.iloc[start, 0])*u.day
day_times = pd.to_numeric(df.iloc[start:end, 0])
time_data = np.subtract((np.array(day_times)*u.day), start_time)
return time_data | 0.003264 |
def on_connect(client):
"""
Sample on_connect function.
Handles new connections.
"""
print "++ Opened connection to %s" % client.addrport()
broadcast('%s joins the conversation.\n' % client.addrport() )
CLIENT_LIST.append(client)
client.send("Welcome to the Chat Server, %s.\n" % client.addrport() ) | 0.009063 |
def build_clustbits(data, ipyclient, force):
"""
Reconstitutes clusters from .utemp and htemp files and writes them
to chunked files for aligning in muscle.
"""
## If you run this step then we clear all tmp .fa and .indel.h5 files
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
os.mkdir(data.tmpdir)
## parallel client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " building clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
uhandle = os.path.join(data.dirs.across, data.name+".utemp")
usort = os.path.join(data.dirs.across, data.name+".utemp.sort")
async1 = ""
## skip usorting if not force and already exists
if not os.path.exists(usort) or force:
## send sort job to engines. Sorted seeds allows us to work through
## the utemp file one locus at a time instead of reading all into mem.
LOGGER.info("building reads file -- loading utemp file into mem")
async1 = lbview.apply(sort_seeds, *(uhandle, usort))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
if async1.ready():
break
else:
time.sleep(0.1)
## send count seeds job to engines.
async2 = lbview.apply(count_seeds, usort)
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 1, printstr.format(elapsed), spacer=data._spacer)
if async2.ready():
break
else:
time.sleep(0.1)
## wait for both to finish while printing progress timer
nseeds = async2.result()
## send the clust bit building job to work and track progress
async3 = lbview.apply(sub_build_clustbits, *(data, usort, nseeds))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 2, printstr.format(elapsed), spacer=data._spacer)
if async3.ready():
break
else:
time.sleep(0.1)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 3, printstr.format(elapsed), spacer=data._spacer)
print("")
## check for errors
for job in [async1, async2, async3]:
try:
if not job.successful():
raise IPyradWarningExit(job.result())
except AttributeError:
## If we skip usorting then async1 == "" so the call to
## successful() raises, but we can ignore it.
pass | 0.004396 |
def _receive_data(self):
"""Gets data from queue"""
result = self.queue.get(block=True)
if hasattr(self.queue, 'task_done'):
self.queue.task_done()
return result | 0.009756 |
def ServicesGet (self, sensor_id):
"""
Retrieve services connected to a sensor in CommonSense.
If ServicesGet is successful, the result can be obtained by a call to getResponse() and should be a json string.
@sensor_id (int) - Sensor id of sensor to retrieve services from.
@return (bool) - Boolean indicating whether ServicesGet was successful.
"""
if self.__SenseApiCall__('/sensors/{0}/services.json'.format(sensor_id), 'GET'):
return True
else:
self.__error__ = "api call unsuccessful"
return False | 0.012177 |
def fromdict(dict):
"""Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert
"""
index = dict['index']
seed = hb_decode(dict['seed'])
n = dict['n']
root = hb_decode(dict['root'])
hmac = hb_decode(dict['hmac'])
timestamp = dict['timestamp']
self = State(index, seed, n, root, hmac, timestamp)
return self | 0.004246 |
def roll(*args, **kwargs):
"""
Calculates a given statistic across a rolling time period.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns (optional): float / series
Benchmark return to compare returns against.
function:
the function to run for each rolling window.
window (keyword): int
the number of periods included in each calculation.
(other keywords): other keywords that are required to be passed to the
function in the 'function' argument may also be passed in.
Returns
-------
np.ndarray, pd.Series
depends on input type
ndarray(s) ==> ndarray
Series(s) ==> pd.Series
A Series or ndarray of the results of the stat across the rolling
window.
"""
func = kwargs.pop('function')
window = kwargs.pop('window')
if len(args) > 2:
raise ValueError("Cannot pass more than 2 return sets")
if len(args) == 2:
if not isinstance(args[0], type(args[1])):
raise ValueError("The two returns arguments are not the same.")
if isinstance(args[0], np.ndarray):
return _roll_ndarray(func, window, *args, **kwargs)
return _roll_pandas(func, window, *args, **kwargs) | 0.000719 |
def Stokes_number(V, Dp, D, rhop, mu):
r'''Calculates Stokes Number for a given characteristic velocity `V`,
particle diameter `Dp`, characteristic diameter `D`, particle density
`rhop`, and fluid viscosity `mu`.
.. math::
\text{Stk} = \frac{\rho_p V D_p^2}{18\mu_f D}
Parameters
----------
V : float
Characteristic velocity (often superficial), [m/s]
Dp : float
Particle diameter, [m]
D : float
Characteristic diameter (ex demister wire diameter or cyclone
diameter), [m]
rhop : float
Particle density, [kg/m^3]
mu : float
Fluid viscosity, [Pa*s]
Returns
-------
Stk : float
Stokes numer, [-]
Notes
-----
Used in droplet impaction or collection studies.
Examples
--------
>>> Stokes_number(V=0.9, Dp=1E-5, D=1E-3, rhop=1000, mu=1E-5)
0.5
References
----------
.. [1] Rhodes, Martin J. Introduction to Particle Technology. Wiley, 2013.
.. [2] Al-Dughaither, Abdullah S., Ahmed A. Ibrahim, and Waheed A.
Al-Masry. "Investigating Droplet Separation Efficiency in Wire-Mesh Mist
Eliminators in Bubble Column." Journal of Saudi Chemical Society 14, no.
4 (October 1, 2010): 331-39. https://doi.org/10.1016/j.jscs.2010.04.001.
'''
return rhop*V*(Dp*Dp)/(18.0*mu*D) | 0.004392 |
def _get_wmi_setting(wmi_class_name, setting, server):
'''
Get the value of the setting for the provided class.
'''
with salt.utils.winapi.Com():
try:
connection = wmi.WMI(namespace=_WMI_NAMESPACE)
wmi_class = getattr(connection, wmi_class_name)
objs = wmi_class([setting], Name=server)[0]
ret = getattr(objs, setting)
except wmi.x_wmi as error:
_LOG.error('Encountered WMI error: %s', error.com_error)
except (AttributeError, IndexError) as error:
_LOG.error('Error getting %s: %s', wmi_class_name, error)
return ret | 0.001577 |
def gpio_properties(self):
"""Returns the properties of the user-controllable GPIOs.
Provided the device supports user-controllable GPIOs, they will be
returned by this method.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of ``JLinkGPIODescriptor`` instances totalling the number of
requested properties.
Raises:
JLinkException: on error.
"""
res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0)
if res < 0:
raise errors.JLinkException(res)
num_props = res
buf = (structs.JLinkGPIODescriptor * num_props)()
res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props)
if res < 0:
raise errors.JLinkException(res)
return list(buf) | 0.002418 |
def _fmt_files(filelist):
""" Produce a file listing.
"""
depth = max(i.path.count('/') for i in filelist)
pad = ['\uFFFE'] * depth
base_indent = ' ' * 38
indent = 0
result = []
prev_path = pad
sorted_files = sorted((i.path.split('/')[:-1]+pad, i.path.rsplit('/', 1)[-1], i) for i in filelist)
for path, name, fileinfo in sorted_files:
path = path[:depth]
if path != prev_path:
common = min([depth] + [idx
for idx, (dirname, prev_name) in enumerate(zip(path, prev_path))
if dirname != prev_name
])
#result.append("!!%r %r" % (indent, common))
#result.append("!!%r" % (prev_path,))
#result.append("!!%r" % (path,))
while indent > common:
indent -= 1
result.append("%s%s/" % (base_indent, ' ' * indent))
for dirname in path[common:]:
if dirname == '\uFFFE':
break
result.append("%s%s\\ %s" % (base_indent, ' ' * indent, dirname))
indent += 1
##result.append("!!%r %r" % (path, name))
result.append(" %s %s %s %s| %s" % (
{0: "off ", 1: " ", 2: "high"}.get(fileinfo.prio, "????"),
fmt.iso_datetime(fileinfo.mtime),
fmt.human_size(fileinfo.size),
' ' * indent, name,
))
prev_path = path
while indent > 0:
indent -= 1
result.append("%s%s/" % (base_indent, ' ' * indent))
result.append("%s= %d file(s)" % (base_indent, len(filelist)))
return '\n'.join(result) | 0.005471 |
def uniprot_map(from_scheme, to_scheme, list_of_from_ids, cache_dir = None, silent = True):
'''Maps from one ID scheme to another using the UniProt service.
list_of_ids should be a list of strings.
This function was adapted from http://www.uniprot.org/faq/28#id_mapping_examples which also gives examples of
valid values for from_scheme and to_scheme.
Note that some conversions are not directly possible e.g. PDB_ID (PDB) to UPARC (UniParc). They need to go through
an intermediary format like ACC (UniProtKB AC) or ID (UniProtKB ID).
This function returns a dict mapping the IDs in from_scheme to a list of sorted IDs in to_scheme.
'''
try:
assert(hasattr(list_of_from_ids, '__iter__'))
except:
raise Exception('The list_of_from_ids argument should be an iterable type (e.g. list).')
full_mapping = {}
cached_mapping_file = None
if cache_dir:
cached_mapping_file = os.path.join(cache_dir, '%s.%s' % (from_scheme, to_scheme))
if os.path.exists(cached_mapping_file):
full_mapping = simplejson.loads(read_file(cached_mapping_file))
list_of_from_ids = set(list_of_from_ids)
requested_mapping = {}
remaining_ids = []
for id in list_of_from_ids:
if full_mapping.get(id):
requested_mapping[id] = full_mapping[id]
else:
remaining_ids.append(id)
assert(set(remaining_ids + requested_mapping.keys()) == set(list_of_from_ids))
if remaining_ids:
if not silent:
print("Getting %s->%s mapping" % (from_scheme, to_scheme))
url = 'http://www.uniprot.org/mapping/'
params = {
'from' : from_scheme,
'to' : to_scheme,
'format' : 'tab',
'query' : ' '.join(sorted(list(list_of_from_ids))),
}
data = urllib.urlencode(params)
request = urllib2.Request(url, data)
contact = "" # Please set your email address here to help us debug in case of problems.
request.add_header('User-Agent', 'Python %s' % contact)
response = urllib2.urlopen(request)
page = response.read(200000)
lines = page.split("\n")
assert(lines[-1] == '')
assert(lines[0].split("\t") == ['From', 'To'])
for line in lines[1:-1]:
tokens = line.split("\t")
assert(len(tokens) == 2)
assert(tokens[0] in list_of_from_ids)
full_mapping[tokens[0]] = full_mapping.get(tokens[0], [])
full_mapping[tokens[0]].append(tokens[1])
requested_mapping[tokens[0]] = requested_mapping.get(tokens[0], [])
requested_mapping[tokens[0]].append(tokens[1])
# Sort the IDs
for k, v in requested_mapping.iteritems():
#assert(len(v) == len(set(v)))
requested_mapping[k] = sorted(set(v))
for k, v in full_mapping.iteritems():
#assert(len(v) == len(set(v)))
full_mapping[k] = sorted(set(v))
if remaining_ids and cached_mapping_file:
write_file(cached_mapping_file, simplejson.dumps(full_mapping))
return requested_mapping | 0.006646 |
def _members(self):
"""
Return a dict of non-private members.
"""
return {
key: value
for key, value in self.__dict__.items()
# NB: ignore internal SQLAlchemy state and nested relationships
if not key.startswith("_") and not isinstance(value, Model)
} | 0.005882 |
def read_file(self, filename, destination=''):
"""reading data from device into local file"""
if not destination:
destination = filename
log.info('Transferring %s to %s', filename, destination)
data = self.download_file(filename)
# Just in case, the filename may contain folder, so create it if needed.
log.info(destination)
if not os.path.exists(os.path.dirname(destination)):
try:
os.makedirs(os.path.dirname(destination))
except OSError as e: # Guard against race condition
if e.errno != errno.EEXIST:
raise
with open(destination, 'w') as fil:
fil.write(data) | 0.004121 |
def add(self, classifier, threshold, begin=None, end=None):
"""Adds a new strong classifier with the given threshold to the cascade.
**Parameters:**
classifier : :py:class:`bob.learn.boosting.BoostedMachine`
A strong classifier to add
``threshold`` : float
The classification threshold for this cascade step
``begin``, ``end`` : int or ``None``
If specified, only the weak machines with the indices ``range(begin,end)`` will be added.
"""
boosted_machine = bob.learn.boosting.BoostedMachine()
if begin is None: begin = 0
if end is None: end = len(classifier.weak_machines)
for i in range(begin, end):
boosted_machine.add_weak_machine(classifier.weak_machines[i], classifier.weights[i])
self.cascade.append(boosted_machine)
self.thresholds.append(threshold)
self._indices() | 0.007042 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.