text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def setup_formats(self):
"""
Inspects its methods to see what it can convert from and to
"""
methods = self.get_methods()
for m in methods:
#Methods named "from_X" will be assumed to convert from format X to the common format
if m.startswith("from_"):
self.input_formats.append(re.sub("from_" , "",m))
#Methods named "to_X" will be assumed to convert from the common format to X
elif m.startswith("to_"):
self.output_formats.append(re.sub("to_","",m)) | 0.017513 |
def _hide_column(self, column):
'''Hides a column by prefixing the name with \'__\''''
column = _ensure_string_from_expression(column)
new_name = self._find_valid_name('__' + column)
self._rename(column, new_name) | 0.008163 |
def _virtual_hv(osdata):
'''
Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen
'''
grains = {}
# Bail early if we're not running on Xen
try:
if 'xen' not in osdata['virtual']:
return grains
except KeyError:
return grains
# Try to get the exact hypervisor version from sysfs
try:
version = {}
for fn in ('major', 'minor', 'extra'):
with salt.utils.files.fopen('/sys/hypervisor/version/{}'.format(fn), 'r') as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
grains['virtual_hv_version'] = '{}.{}{}'.format(version['major'], version['minor'], version['extra'])
grains['virtual_hv_version_info'] = [version['major'], version['minor'], version['extra']]
except (IOError, OSError, KeyError):
pass
# Try to read and decode the supported feature set of the hypervisor
# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py
# Table data from include/xen/interface/features.h
xen_feature_table = {0: 'writable_page_tables',
1: 'writable_descriptor_tables',
2: 'auto_translated_physmap',
3: 'supervisor_mode_kernel',
4: 'pae_pgdir_above_4gb',
5: 'mmu_pt_update_preserve_ad',
7: 'gnttab_map_avail_bits',
8: 'hvm_callback_vector',
9: 'hvm_safe_pvclock',
10: 'hvm_pirqs',
11: 'dom0',
12: 'grant_map_identity',
13: 'memory_op_vnode_supported',
14: 'ARM_SMCCC_supported'}
try:
with salt.utils.files.fopen('/sys/hypervisor/properties/features', 'r') as fhr:
features = salt.utils.stringutils.to_unicode(fhr.read().strip())
enabled_features = []
for bit, feat in six.iteritems(xen_feature_table):
if int(features, 16) & (1 << bit):
enabled_features.append(feat)
grains['virtual_hv_features'] = features
grains['virtual_hv_features_list'] = enabled_features
except (IOError, OSError, KeyError):
pass
return grains | 0.005065 |
def set_printoptions(**kwargs):
"""Set printing options.
These options determine the way JPEG 2000 boxes are displayed.
Parameters
----------
short : bool, optional
When True, only the box ID, offset, and length are displayed. Useful
for displaying only the basic structure or skeleton of a JPEG 2000
file.
xml : bool, optional
When False, printing of the XML contents of any XML boxes or UUID XMP
boxes is suppressed.
codestream : bool, optional
When False, the codestream segments are not printed. Otherwise the
segments are printed depending on how set_parseoptions has been used.
See also
--------
get_printoptions
Examples
--------
To put back the default options, you can use:
>>> import glymur
>>> glymur.set_printoptions(short=False, xml=True, codestream=True)
"""
warnings.warn('Use set_option instead of set_printoptions.',
DeprecationWarning)
for key, value in kwargs.items():
if key not in ['short', 'xml', 'codestream']:
raise KeyError('"{0}" not a valid keyword parameter.'.format(key))
set_option('print.' + key, value) | 0.000823 |
def lstm(name, input, state_c, state_h, kernel_i, kernel_j, kernel_f, kernel_o, bias_i, bias_j, bias_f, bias_o, new_state_c, new_state_h):
''' Full:
- it = f(Xt*Wi + Ht_1*Ri + Pi . Ct_1 + Wbi + Rbi)
- ft = f(Xt*Wf + Ht_1*Rf + Pf . Ct_1 + Wbf + Rbf)
- ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc)
- Ct = ft . Ct_1 + it . ct
- ot = f(Xt*Wo + Ht_1*Ro + Po . Ct + Wbo + Rbo)
- Ht = ot . h(Ct)
'''
''' No peephole:
- it = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
- ft = f(Xt*Wf + Ht_1*Rf + Wbf + Rbf)
- ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc)
- Ct = ft . Ct_ + it . ct
- ot = f(Xt*Wo + Ht_1*Ro + Wbo + Rbo)
- Ht = ot . h(Ct)
'''
nn = Build(name)
inputs = nn.concat(input, state_h)
i = nn.sigmoid(nn.mad(x=inputs, kernel=kernel_i, bias=bias_i))
j = nn.tanh(nn.mad(inputs, kernel_j, bias_j))
f = nn.sigmoid(nn.mad(inputs, kernel_f, bias_f))
o = nn.sigmoid(nn.mad(inputs, kernel_o, bias_o))
# new_c = state_c * f' + i' * j'
nn.add(
nn.mul(state_c, f), nn.mul(i, j),
out=new_state_c)
# new_h =
nn.mul(o, nn.tanh(new_state_c),
out=new_state_h)
return nn.layers | 0.004248 |
def post_collect(self, obj):
"""
We want to manage the side-effect of not collecting other items of the same type as root model.
If for example, you run the collect on a specific user that is linked to a model "A" linked (ForeignKey)
to ANOTHER user.
Then the collect won't collect this other user, but the collected model "A" will keep the ForeignKey value of
a user we are not collecting.
For now, we set the ForeignKey of ANOTHER user to the root one, to be sure that model "A" will always be linked
to an existing user (of course, the meaning is changing, but only if this field is not unique.
Before:
user1 -> modelA -> user2843
After collection:
user1 -> modelA -> user1
"""
if not self.ALLOWS_SAME_TYPE_AS_ROOT_COLLECT:
for field in self.get_local_fields(obj):
if isinstance(field, ForeignKey) and not field.unique:
# Relative field's API has been changed Django 2.0
# See https://docs.djangoproject.com/en/2.0/releases/1.9/#field-rel-changes for details
if django.VERSION[0] == 2:
remote_model = field.remote_field.model
else:
remote_model = field.rel.to
if isinstance(self.root_obj, remote_model):
setattr(obj, field.name, self.root_obj) | 0.005479 |
def p_function_body(p):
""" function_body : program_co END FUNCTION
| program_co END SUB
| statements_co END FUNCTION
| statements_co END SUB
| co_statements_co END FUNCTION
| co_statements_co END SUB
| END FUNCTION
| END SUB
"""
if not FUNCTION_LEVEL:
syntax_error(p.lineno(3), "Unexpected token 'END %s'. No Function or Sub has been defined." % p[2])
p[0] = None
return
a = FUNCTION_LEVEL[-1].kind
if a not in (KIND.sub, KIND.function): # This function/sub was not correctly declared, so exit now
p[0] = None
return
i = 2 if p[1] == 'END' else 3
b = p[i].lower()
if a != b:
syntax_error(p.lineno(i), "Unexpected token 'END %s'. Should be 'END %s'" % (b.upper(), a.upper()))
p[0] = None
else:
p[0] = None if p[1] == 'END' else p[1] | 0.00404 |
def render_error_page(code, exc, mimetype='text/html', traceback=''):
"""
Render the error page
"""
from giotto.views import get_jinja_template
if 'json' in mimetype:
return json.dumps({
'code': code,
'exception': exc.__class__.__name__,
'message': str(exc),
})
et = get_config('error_template')
if not et:
return "%s %s\n%s" % (code, str(exc), traceback)
template = get_jinja_template(et)
return template.render(
code=code,
exception=exc.__class__.__name__,
message=str(exc),
traceback=traceback
) | 0.001582 |
def main():
mesh = parse_gmsh('../commands.msh', '../boundary_lines.dat')
# now create the CRTomo grid
"""
1. Header
2. Nodes
3. Elements: Triangles, Boundary elements
4. Element ids for adjoining boundary elements
"""
str_header = get_header(mesh)
str_nodes = get_nodes(mesh)
str_elements = get_elements(mesh)
str_adj_boundaries, boundary_elements = get_ajd_bound(mesh)
crt_mesh = str_header + str_nodes + str_elements + str_adj_boundaries
fid = open('../elem.dat', 'w')
fid.write(crt_mesh)
fid.close()
write_elec_file('../electrode_positions.dat', mesh)
debug_plot_mesh(mesh, boundary_elements) | 0.001486 |
def get_context_data(self, **kwargs):
"""
Add filter form to the context.
TODO: Currently we construct the filter form object twice - in
get_queryset and here, in get_context_data. Will need to figure out a
good way to eliminate extra initialization.
"""
context = super(FilterFormMixin, self).get_context_data(**kwargs)
context[self.context_filterform_name] = self.get_filter()
return context | 0.004292 |
def decode_pdf_date(s: str) -> datetime:
"""Decode a pdfmark date to a Python datetime object
A pdfmark date is a string in a paritcular format. See the pdfmark
Reference for the specification.
"""
if isinstance(s, String):
s = str(s)
if s.startswith('D:'):
s = s[2:]
# Literal Z00'00', is incorrect but found in the wild,
# probably made by OS X Quartz -- standardize
if s.endswith("Z00'00'"):
s = s.replace("Z00'00'", '+0000')
elif s.endswith('Z'):
s = s.replace('Z', '+0000')
s = s.replace("'", "") # Remove apos from PDF time strings
try:
return datetime.strptime(s, r'%Y%m%d%H%M%S%z')
except ValueError:
return datetime.strptime(s, r'%Y%m%d%H%M%S') | 0.001323 |
def dataframe( self, only_successful = True ):
"""Return the results as a pandas DataFrame. Note that there is a danger
of duplicate labels here, for example if the results contain a value
with the same name as one of the parameters. To resolve this, parameter names
take precedence over metadata values, and result names take precedence over
parameter names.
If the only_successful flag is set (the default), then the DataFrame will
only include results that completed without an exception; if it is set to
False, the DataFrame will include all results and also the exception details.
:param only_successful: include only successful experiments (defaults to True)
:returns: the parameters, results, and metadata in a DataFrame"""
def extract( r ):
if r[Experiment.METADATA][Experiment.STATUS]:
# experiment was a success, include it
rd = r[Experiment.METADATA].copy()
rd.update(r[Experiment.PARAMETERS])
rd.update(r[Experiment.RESULTS])
else:
# experiment returned an exception
if not only_successful:
# ...but we want it anyway
rd = r[Experiment.METADATA].copy()
rd.update(r[Experiment.PARAMETERS])
# ...and there are no results to add
else:
rd = None
return rd
records = [ r for r in map(extract, self.results()) if r is not None ]
return DataFrame.from_records(records) | 0.010962 |
def convert_pre(self, markup):
""" Substitutes <pre> to Wikipedia markup by adding a space at the start of a line.
"""
for m in re.findall(self.re["preformatted"], markup):
markup = markup.replace(m, m.replace("\n", "\n "))
markup = re.sub("<pre.*?>\n{0,}", "", markup)
markup = re.sub("\W{0,}</pre>", "", markup)
return markup | 0.016588 |
def add_filter(self, key, filter_value):
"""
add and validate a filter with value
returns True on success otherwise exception
"""
seek = u"filter[%s]" % key
if self.validate_filter(key, filter_value):
self.filters[key] = filter_value
return True
else:
msg = u'Invalid filter value: filter:%s value:%s' % (key, filter_value)
print msg
raise SalesKingException("FILTER_INVALID", msg ) | 0.008032 |
def _export_work_errors(self, work, output_file):
"""Saves errors for given work pieces into file.
Args:
work: instance of either AttackWorkPieces or DefenseWorkPieces
output_file: name of the output file
"""
errors = set()
for v in itervalues(work.work):
if v['is_completed'] and v['error'] is not None:
errors.add(v['error'])
with open(output_file, 'w') as f:
for e in sorted(errors):
f.write(e)
f.write('\n') | 0.006198 |
def distance(latitude_1, longitude_1, elevation_1, latitude_2, longitude_2, elevation_2,
haversine=None):
""" Distance between two points """
# If points too distant -- compute haversine distance:
if haversine or (abs(latitude_1 - latitude_2) > .2 or abs(longitude_1 - longitude_2) > .2):
return haversine_distance(latitude_1, longitude_1, latitude_2, longitude_2)
coef = math.cos(latitude_1 / 180. * math.pi)
#pylint: disable=invalid-name
x = latitude_1 - latitude_2
y = (longitude_1 - longitude_2) * coef
distance_2d = math.sqrt(x * x + y * y) * ONE_DEGREE
if elevation_1 is None or elevation_2 is None or elevation_1 == elevation_2:
return distance_2d
return math.sqrt(distance_2d ** 2 + (elevation_1 - elevation_2) ** 2) | 0.007519 |
def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return DenseVector(value)
raise TypeError("Could not convert %s to vector" % value) | 0.004444 |
def echo_via_pager(*args, **kwargs):
"""Display pager only if it does not fit in one terminal screen.
NOTE: The feature is available only on ``less``-based pager.
"""
try:
restore = 'LESS' not in os.environ
os.environ.setdefault('LESS', '-iXFR')
click.echo_via_pager(*args, **kwargs)
finally:
if restore:
os.environ.pop('LESS', None) | 0.002513 |
def get_assessment_data(self, queryset, total_count, user_id):
"""
Calculates and sets the following data for the supplied queryset:
data = {
'count': <the number of items in the queryset>
'percentage': <percentage of total_count queryset represents>
'is_user_call': <true if user made this call, false otherwise>
'users': <set of all users who made this call>
}
"""
# We need to convert the usernames to strings here because the JSON
# encoder will choke when serializing this data if the usernames are
# unicode as they are when we get them back from the distinct call.
users = [{'username': str(username), 'email': email}
for username, email
in queryset.values_list('user__username', 'user__email')
.distinct()]
count = queryset.count()
is_user_call = queryset.filter(user=user_id).exists()
return {
'count': count,
'percentage': count / float(total_count) * 100.0,
'is_user_call': is_user_call,
'users': users,
} | 0.00166 |
def update_context(app, pagename, templatename, context, doctree): # pylint: disable=unused-argument
"""
Update the page rendering context to include ``feedback_form_url``.
"""
context['feedback_form_url'] = feedback_form_url(app.config.project, pagename) | 0.011029 |
def sre_to_string(sre_obj, paren=True):
"""sre_parse object to string
:param sre_obj: Output of sre_parse.parse()
:type sre_obj: list
:rtype: str
"""
ret = u''
for i in sre_obj:
if i[0] == sre_parse.IN:
prefix = ''
if len(i[1]) and i[1][0][0] == sre_parse.NEGATE:
prefix = '^'
ret += u'[{0}{1}]'.format(prefix, sre_to_string(i[1], paren=paren))
elif i[0] == sre_parse.LITERAL:
u = unichr(i[1])
ret += u if u not in sre_parse.SPECIAL_CHARS else '\\{0}'.format(u)
elif i[0] == sre_parse.CATEGORY:
ret += REVERSE_CATEGORIES[i[1]]
elif i[0] == sre_parse.ANY:
ret += '.'
elif i[0] == sre_parse.BRANCH:
# TODO simplifications here
parts = [sre_to_string(x, paren=paren) for x in i[1][1]]
if not any(parts):
continue
if i[1][0]:
if len(parts) == 1:
paren = False
prefix = ''
else:
prefix = '?:'
branch = '|'.join(parts)
if paren:
ret += '({0}{1})'.format(prefix, branch)
else:
ret += '{0}'.format(branch)
elif i[0] == sre_parse.SUBPATTERN:
subexpr = i[1][1]
if IS_PY36_OR_GREATER and i[0] == sre_parse.SUBPATTERN:
subexpr = i[1][3]
if i[1][0]:
ret += '({0})'.format(sre_to_string(subexpr, paren=False))
else:
ret += '{0}'.format(sre_to_string(subexpr, paren=paren))
elif i[0] == sre_parse.NOT_LITERAL:
ret += '[^{0}]'.format(unichr(i[1]))
elif i[0] == sre_parse.MAX_REPEAT:
if i[1][0] == i[1][1]:
range_str = '{{{0}}}'.format(i[1][0])
else:
if i[1][0] == 0 and i[1][1] - i[1][0] == sre_parse.MAXREPEAT:
range_str = '*'
elif i[1][0] == 1 and i[1][1] - i[1][0] == sre_parse.MAXREPEAT - 1:
range_str = '+'
else:
range_str = '{{{0},{1}}}'.format(i[1][0], i[1][1])
ret += sre_to_string(i[1][2], paren=paren) + range_str
elif i[0] == sre_parse.MIN_REPEAT:
if i[1][0] == 0 and i[1][1] == sre_parse.MAXREPEAT:
range_str = '*?'
elif i[1][0] == 1 and i[1][1] == sre_parse.MAXREPEAT:
range_str = '+?'
elif i[1][1] == sre_parse.MAXREPEAT:
range_str = '{{{0},}}?'.format(i[1][0])
else:
range_str = '{{{0},{1}}}?'.format(i[1][0], i[1][1])
ret += sre_to_string(i[1][2], paren=paren) + range_str
elif i[0] == sre_parse.GROUPREF:
ret += '\\{0}'.format(i[1])
elif i[0] == sre_parse.AT:
if i[1] == sre_parse.AT_BEGINNING:
ret += '^'
elif i[1] == sre_parse.AT_END:
ret += '$'
elif i[0] == sre_parse.NEGATE:
pass
elif i[0] == sre_parse.RANGE:
ret += '{0}-{1}'.format(unichr(i[1][0]), unichr(i[1][1]))
elif i[0] == sre_parse.ASSERT:
if i[1][0]:
ret += '(?={0})'.format(sre_to_string(i[1][1], paren=False))
else:
ret += '{0}'.format(sre_to_string(i[1][1], paren=paren))
elif i[0] == sre_parse.ASSERT_NOT:
pass
else:
print('[!] cannot handle expression "%s"' % str(i))
return ret | 0.000558 |
def closest_hendecasyllable_patterns(self, scansion: str) -> List[str]:
"""
Find the closest group of matching valid hendecasyllable patterns.
:return: list of the closest valid hendecasyllable patterns; only candidates with a matching
length/number of syllables are considered.
>>> print(MetricalValidator().closest_hendecasyllable_patterns('UU-UU-U-U-X'))
['-U-UU-U-U-X', 'U--UU-U-U-X']
"""
return self._closest_patterns(self.VALID_HENDECASYLLABLES, scansion) | 0.00759 |
def y_max(self):
"""Get max y value for the variable."""
end_y = max(y for y, *_ in self.iterator())
if self.combined:
end_y += self.group_offset
return end_y + 2 * self.group_offset | 0.008772 |
def _set_xfpe(self, v, load=False):
"""
Setter method for xfpe, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/xfpe (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_xfpe is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_xfpe() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=xfpe.xfpe, is_container='container', presence=False, yang_name="xfpe", rest_name="xfpe", parent=self, choice=(u'interface-identifier', u'xfpe'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """xfpe must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=xfpe.xfpe, is_container='container', presence=False, yang_name="xfpe", rest_name="xfpe", parent=self, choice=(u'interface-identifier', u'xfpe'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)""",
})
self.__xfpe = t
if hasattr(self, '_set'):
self._set() | 0.006317 |
def _set_dpod(self, v, load=False):
"""
Setter method for dpod, mapped from YANG variable /dpod (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_dpod is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dpod() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=dpod.dpod, is_container='container', presence=False, yang_name="dpod", rest_name="dpod", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Manage and display DPOD license assignments.\nUsage: dpod [slot/port] [reserve|release]', u'display-when': u'(/c:capabilities/c:license/c:dpod_display = "true")'}}, namespace='urn:brocade.com:mgmt:brocade-license', defining_module='brocade-license', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dpod must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=dpod.dpod, is_container='container', presence=False, yang_name="dpod", rest_name="dpod", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Manage and display DPOD license assignments.\nUsage: dpod [slot/port] [reserve|release]', u'display-when': u'(/c:capabilities/c:license/c:dpod_display = "true")'}}, namespace='urn:brocade.com:mgmt:brocade-license', defining_module='brocade-license', yang_type='container', is_config=True)""",
})
self.__dpod = t
if hasattr(self, '_set'):
self._set() | 0.005031 |
def autoComplete(self,
polygons=[],
polylines=[],
sr=None
):
"""
The autoComplete operation simplifies the process of
constructing new polygons that are adjacent to other polygons.
It constructs polygons that fill in the gaps between existing
polygons and a set of polylines.
Inputs:
polygons - array of Polygon objects.
polylines - list of Polyline objects.
sr - spatial reference of the input geometries WKID.
"""
url = self._url + "/autoComplete"
params = {"f":"json"}
if sr is not None:
params['sr'] = sr
params['polygons'] = self.__geomToStringArray(polygons)
params['polylines'] = self.__geomToStringArray(polylines)
return self._get(url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | 0.009099 |
def _coerce_dtype(self, other_dtype):
"""Possibly change the bin content type to allow correct operations with other operand.
Parameters
----------
other_dtype : np.dtype or type
"""
if self._dtype is None:
new_dtype = np.dtype(other_dtype)
else:
new_dtype = np.find_common_type([self._dtype, np.dtype(other_dtype)], [])
if new_dtype != self.dtype:
self.set_dtype(new_dtype) | 0.008421 |
def from_dict(cls, context_options_dict):
"""Return a context job from a dict output by Context.to_dict."""
import copy
context_options = copy.deepcopy(context_options_dict)
tasks_inserted = context_options.pop('_tasks_inserted', False)
insert_tasks = context_options.pop('insert_tasks', None)
if insert_tasks:
context_options['insert_tasks'] = path_to_reference(insert_tasks)
# The constructor expects a reference to the persistence engine.
persistence_engine = context_options.pop('persistence_engine', None)
if persistence_engine:
context_options['persistence_engine'] = path_to_reference(
persistence_engine)
# If there are callbacks, reconstitute them.
callbacks = context_options.pop('callbacks', None)
if callbacks:
context_options['callbacks'] = decode_callbacks(callbacks)
context = cls(**context_options)
context._tasks_inserted = tasks_inserted
return context | 0.001905 |
def launch_plugin(self):
'''
launch nagios_plugin command
'''
# nagios_plugins probes
for plugin in self.plugins:
# Construct the nagios_plugin command
command = ('%s%s' % (self.plugins[plugin]['path'], self.plugins[plugin]['command']))
try:
nagios_plugin = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
LOG.error("[nagios_plugins]: '%s' executable is missing",
command)
else:
output = nagios_plugin.communicate()[0].strip()
return_code = nagios_plugin.returncode
if return_code >= len(STATUSES):
LOG.error("[nagios_plugins]: '%s' executable has an issue, return code: %s",
command, return_code)
else:
LOG.log(STATUSES[return_code][1],
"[nagios_plugins][%s] (%s status): %s",
plugin,
STATUSES[return_code][0],
output)
yield {'return_code': int(return_code),
'output': str(output),
'time_stamp': int(time.time()),
'service_description': plugin,
'specific_servers': self.plugins[plugin]['servers']} | 0.003034 |
def ReadingBloomFilter(filename, want_lock=False):
"""
Create a read-only bloom filter with an upperbound of
(num_elements, max_fp_prob) as a specification and using filename
as the backing datastore.
"""
with open('{}.desc'.format(filename), 'r') as descriptor:
num_elements = int(descriptor.readline())
max_fp_prob = float(descriptor.readline())
ignore_case = int(descriptor.readline())
return _hydra.BloomFilter.getFilter(
num_elements, max_fp_prob,
filename=filename, ignore_case=ignore_case,
read_only=True, want_lock=want_lock) | 0.001639 |
def disable(self):
"""
Disable the Cloud.
:returns: A list of mist.clients' updated clouds.
"""
payload = {
"new_state": "0"
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+'/clouds/'+self.id, data=data)
req.post()
self.enabled = False
self.mist_client.update_clouds() | 0.005115 |
def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd()
try:
yield os.chdir(directory)
finally:
os.chdir(cur) | 0.002445 |
def run(self):
"""Run analysis.
The basic idea is to recursively find all script files in specific
programming language, and analyze each file then sum it up.
"""
n_target_file, n_other_file = 0, 0
code, comment, docstr, purecode = 0, 0, 0, 0
fc = FileCollection.from_path_except(self.workspace, self.ignore)
fc_yes, fc_no = fc.select(self.filter, keepboth=True)
n_other_file += len(fc_no)
for abspath in fc_yes:
try:
with open(abspath, "rb") as f:
code_text = f.read().decode("utf-8")
code_, comment_, docstr_, purecode_ = self.analyzer(
code_text)
code += code_
comment += comment_
docstr += docstr_
purecode += purecode_
n_target_file += 1
except Exception as e:
n_other_file += 1
lines = list()
lines.append("Code statistic result for '%s'" % self.workspace)
lines.append(" %r %r files, %r other files." %
(n_target_file, self.language, n_other_file))
lines.append(" code line: %s" % code)
lines.append(" comment line: %s" % comment)
lines.append(" docstr line: %s" % docstr)
lines.append(" purecode line: %s" % purecode)
message = "\n".join(lines)
print(message)
return message | 0.001995 |
def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject,
parsing_plan_for_children: Dict[str, AnyParser._RecursiveParsingPlan],
logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
"""
Implementation of AnyParser API
"""
raise Exception('This should never happen, since this parser relies on underlying parsers') | 0.016908 |
def from_notebook_node(self, nb, resources=None, **kw):
"""Takes output of OneCodexHTMLExporter and runs Weasyprint to get a PDF."""
from weasyprint import HTML, CSS
nb = copy.deepcopy(nb)
output, resources = super(OneCodexPDFExporter, self).from_notebook_node(
nb, resources=resources, **kw
)
buf = BytesIO()
HTML(string=output).write_pdf(
buf, stylesheets=[CSS(os.path.join(ASSETS_PATH, CSS_TEMPLATE_FILE))]
)
buf.seek(0)
return buf.read(), resources | 0.008961 |
def _wrap_command(cmds, cls, strict=True):
"""Wrap a setup command
Parameters
----------
cmds: list(str)
The names of the other commands to run prior to the command.
strict: boolean, optional
Whether to raise errors when a pre-command fails.
"""
class WrappedCommand(cls):
def run(self):
if not getattr(self, 'uninstall', None):
try:
[self.run_command(cmd) for cmd in cmds]
except Exception:
if strict:
raise
else:
pass
# update package data
update_package_data(self.distribution)
result = cls.run(self)
return result
return WrappedCommand | 0.001253 |
def _read_ready(self):
"""Called by the event loop whenever the fd is ready for reading."""
try:
data = os.read(self._fileno, self.max_size)
except InterruptedError:
# No worries ;)
pass
except OSError as exc:
# Some OS-level problem, crash.
self._fatal_error(exc, "Fatal read error on file descriptor read")
else:
if data:
self._protocol.data_received(data)
else:
# We reached end-of-file.
if self._loop.get_debug():
logger.info("%r was closed by the kernel", self)
self._closing = False
self.pause_reading()
self._loop.call_soon(self._protocol.eof_received)
self._loop.call_soon(self._call_connection_lost, None) | 0.002296 |
def setzscale(self, z1="auto", z2="auto", nsig=3, samplesizelimit = 10000, border=300):
"""
We set z1 and z2, according to different algorithms or arguments.
For both z1 and z2, give either :
- "auto" (default automatic, different between z1 and z2)
- "ex" (extrema)
- "flat" ("sigma-cuts" around median value, well-suited for flatfields)
- numeric value like 1230.34
nsig is the number of sigmas to be rejected (used by auto z1 + both flats)
samplesizelimit is the maximum number of pixels to compute statistics on.
If your image is larger then samplesizelimit, I will use only samplesizelimit pixels of it.
If your image is 3 times border in width and height, I will skip border pixels around the image before
doing calculations. This is made to get rid of the overscan and prescan etc.
So you can basically leave this at 300, it will only affect images wider then 900 pixels.
(300 happens to be a safe value for many telescopes.)
You can put border = 0 to deactivate this feature.
If you give nothing, the cutoff will not be changed.
You should set the z scale directly after cropping the image.
"""
if self.pilimage != None:
raise RuntimeError, "Cannot set z scale anymore, PIL image already exists !"
if self.numpyarray.shape[0] > 3 * border and self.numpyarray.shape[1] > 3 * border:
if border > 0:
if self.verbose :
print "For the stats I will leave a border of %i pixels" % border
calcarray = self.numpyarray[border:-border, border:-border].copy()
else:
calcarray = self.numpyarray.copy()
else:
calcarray = self.numpyarray.copy()
if self.verbose:
print "Image is too small for a border of %i" % (border)
# Starting with the simple possibilities :
if z1 == "ex" :
self.z1 = np.min(calcarray)
if self.verbose:
print "Setting ex z1 to %f" % self.z1
if z2 == "ex":
self.z2 = np.max(calcarray)
if self.verbose:
print "Setting ex z2 to %f" % self.z2
if type(z1) == type(0) or type(z1) == type(0.0):
self.z1 = z1
if self.verbose:
print "Setting z1 to %f" % self.z1
if type(z2) == type(0) or type(z2) == type(0.0):
self.z2 = z2
if self.verbose:
print "Setting z2 to %f" % self.z2
# Now it gets a little more sophisticated.
if z1 == "auto" or z2 == "auto" or z1 == "flat" or z2 == "flat":
# To speed up, we do not want to do statistics on the full image if it is large.
# So we prepare a small random sample of pixels.
calcarray.shape = calcarray.size # We flatten the 2D array
if calcarray.size > samplesizelimit :
#selectionindices = np.random.random_integers(low = 0, high = calcarray.size - 1, size=samplesizelimit)
selectionindices = np.linspace(0, calcarray.size-1, samplesizelimit).astype(np.int)
statsel = calcarray[selectionindices]
else :
statsel = calcarray
#nbrofbins = 10 + int(np.log10(calcarray.size)*10.0)
#print "Building histogram with %i bins" % nbrofbins
#nbrofbins = 100
#hist = np.histogram(statsel, bins=nbrofbins, range=(self.z1, self.z2), normed=False, weights=None, new=True)
medianlevel = np.median(statsel)
firststd = np.std(statsel)
if z1 == "auto" :
# 2 sigma clipping (quick and dirty star removal) :
nearskypixvals = statsel[np.logical_and(statsel > medianlevel - 2*firststd, statsel < medianlevel + 2*firststd)]
skylevel = np.median(nearskypixvals)
secondstd = np.std(nearskypixvals)
if self.verbose :
print "Sky level at %f +/- %f" % (skylevel, secondstd)
self.z1 = skylevel - nsig*secondstd
if self.verbose :
print "Setting auto z1 to %f, nsig = %i" % (self.z1, nsig)
if z2 == "auto" :
# Here we want to reject a percentage of high values...
sortedstatsel = np.sort(statsel)
n = round(0.9995 * statsel.size)
self.z2 = sortedstatsel[n]
if self.verbose :
print "Setting auto z2 to %f" % self.z2
if z1 == "flat" :
# 5 sigma clipping to get rid of cosmics :
nearflatpixvals = statsel[np.logical_and(statsel > medianlevel - 5*firststd, statsel < medianlevel + 5*firststd)]
flatlevel = np.median(nearflatpixvals)
flatstd = np.std(nearflatpixvals)
self.z1 = flatlevel - nsig*flatstd
if self.verbose :
print "Setting flat z1 : %f, nsig = %i" % (self.z1, nsig)
if z2 == "flat" : # symmetric to z1
# 5 sigma clipping to get rid of cosmics :
nearflatpixvals = statsel[np.logical_and(statsel > medianlevel - 5*firststd, statsel < medianlevel + 5*firststd)]
flatlevel = np.median(nearflatpixvals)
flatstd = np.std(nearflatpixvals)
self.z2 = flatlevel + nsig*flatstd
if self.verbose :
print "Setting flat z2 : %f, nsig = %i" % (self.z2, nsig) | 0.015176 |
def disconnect(self):
"""
Ends a client authentication session, performs a logout and a clean up.
"""
if self.r_session:
self.session_logout()
self.r_session = None
self.clear() | 0.008403 |
def parse(self, file):
'''
Method the programmer should call when ready to parse a file.
:param file: exact file path of the file to be processed
:return: PieceTree object representing the file in memory
'''
parser = make_parser()
self.clear()
class Extractor(xml.sax.ContentHandler):
def __init__(self, parent):
self.parent = parent
def startElement(self, name, attrs):
attribs = {}
for attrname in attrs.getNames():
attrvalue = attrs.get(attrname)
attribs[attrname] = attrvalue
self.parent.StartTag(name, attribs)
def characters(self, text):
self.parent.NewData(text)
def endElement(self, name):
self.parent.EndTag(name)
parser.setContentHandler(Extractor(self))
# OFFLINE MODE
parser.setFeature(handler.feature_external_ges, False)
fob = open(file, 'r')
parser.parse(fob)
return self.piece | 0.00183 |
def wget(ftp, f = False, exclude = False, name = False, md5 = False, tries = 10):
"""
download files with wget
"""
# file name
if f is False:
f = ftp.rsplit('/', 1)[-1]
# downloaded file if it does not already exist
# check md5s on server (optional)
t = 0
while md5check(f, ftp, md5, exclude) is not True:
t += 1
if name is not False:
print('# downloading:', name, f)
if exclude is False:
command = 'wget -q --random-wait %s' % (ftp)
else:
command = 'wget -q --random-wait -R %s %s' % (exclude, ftp)
p = Popen(command, shell = True)
p.communicate()
if t >= tries:
print('not downloaded:', name, f)
return [f, False]
return [f, True] | 0.017588 |
def handle_request(self, req, validate=True):
'''
handle a jsonrpc request
req - request as jsonrpc-dict
validate - validate the request? (default: True)
returns jsonrpc-dict with result or error
'''
#result that will be filled and returned
res = {'jsonrpc': '2.0', 'id': -1, 'result': None}
logging.debug('')
logging.debug('--------------------REQUEST' +
'--------------------\n' +
json.dumps(req,
sort_keys=True,
indent=4,
separators=(',', ': ')))
logging.debug('-----------------------------------------------')
notification = False
if self._sessionmaker:
session = self._sessionmaker()
try:
#validate request
if validate:
self._validate_format(req)
self._validate_params(req)
method = req['method']
#check if request is a notification
try:
getattr(self._methods[method]['fct'], '__SLOJSONRPCNotification__')
notification = True
except AttributeError:
notification = False
#call the python function
if 'params' in req:
fct = self._methods[method]['fct']
if isinstance(req['params'], dict):
req['params']['session'] = session
res['result'] = fct(**req['params'])
else:
res['result'] = fct(session, req['params'])
else:
res['result'] = self._methods[method]['fct'](session)
except SLOJSONRPCError as e:
res = e.to_json(req.get('id', None))
except:
logging.debug('Uncaught Exception:')
logging.debug('-------------------\n' + traceback.format_exc())
res = SLOJSONRPCError(-32603).to_json(req.get('id', None))
session.close()
logging.debug('--------------------RESULT' +
'--------------------\n' +
json.dumps(res,
sort_keys=True,
indent=4,
separators=(',', ': ')))
logging.debug('----------------------------------------------')
#return None if a notification
if notification:
return None
elif not 'error' in res:
res['id'] = req['id']
return res | 0.003812 |
def SendToExecSocket(self, code, tid=None):
"""Inject python code into exec socket."""
response = self._SendToExecSocketRaw(json.dumps(code), tid)
return json.loads(response) | 0.005376 |
def with_histogram(name, reservoir_type="uniform", *reservoir_args, **reservoir_kwargs):
"""
Time-measuring decorator: the time spent in the wrapped function is measured
and added to the named metric.
metric_args and metric_kwargs are passed to new_histogram()
"""
hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs)
def wrapper(f):
@functools.wraps(f)
def fun(*args, **kwargs):
t1 = time.time()
res = f(*args, **kwargs)
t2 = time.time()
hmetric.notify(t2-t1)
return res
return fun
return wrapper | 0.00607 |
def major(self):
""" Major inbox feed, contains major activities such as notes and images. """
url = self._subfeed("major")
if "major" in self.url or "minor" in self.url:
return self
if self._major is None:
self._major = self.__class__(url, pypump=self._pump)
return self._major | 0.008772 |
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
except KeyError:
return default
if type is not None:
try:
rv = type(rv)
except ValueError:
rv = default
return rv | 0.001642 |
def get_configparser(filename=''):
"""
Read main configuration file and all files from *conf.d* subdirectory
and return parsed configuration as a **configparser.RawConfigParser**
instance.
"""
filename = filename or os.environ.get('SHELTER_CONFIG_FILENAME', '')
if not filename:
raise ImproperlyConfiguredError(_(
"Configuration file is not defined. You must either "
"set 'SHELTER_CONFIG_FILENAME' environment variable or "
"'-f/--config-file' command line argument."
))
parser = six.moves.configparser.RawConfigParser()
for conf_file in get_conf_files(filename):
logger.info("Found config '%s'", conf_file)
if not parser.read(conf_file):
logger.warning("Error while parsing config '%s'", conf_file)
return parser | 0.001198 |
def pairwise_align_sequences_to_representative_parallelize(self, sc, gapopen=10, gapextend=0.5, outdir=None,
engine='needle', parse=True, force_rerun=False):
"""Pairwise all sequences in the sequences attribute to the representative sequence. Stores the alignments
in the ``sequence_alignments`` DictList attribute.
Args:
sc (SparkContext): Configured spark context for parallelization
gapopen (int): Only for ``engine='needle'`` - Gap open penalty is the score taken away when a gap is created
gapextend (float): Only for ``engine='needle'`` - Gap extension penalty is added to the standard gap penalty
for each base or residue in the gap
outdir (str): Only for ``engine='needle'`` - Path to output directory. Default is the protein sequence
directory.
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
parse (bool): Store locations of mutations, insertions, and deletions in the alignment object (as an
annotation)
force_rerun (bool): Only for ``engine='needle'`` - Default False, set to True if you want to rerun the
alignment if outfile exists.
"""
if not self.representative_sequence:
raise ValueError('{}: no representative sequence set'.format(self.id))
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
def pairwise_sc(self, seqprop):
aln_id = '{}_{}'.format(self.id, seqprop.id)
outfile = '{}.needle'.format(aln_id)
aln = ssbio.protein.sequence.utils.alignment.pairwise_sequence_alignment(a_seq=self.representative_sequence.seq_str,
a_seq_id=self.id,
b_seq=seqprop.seq_str,
b_seq_id=seqprop.id,
gapopen=gapopen,
gapextend=gapextend,
engine=engine,
outdir=outdir,
outfile=outfile,
force_rerun=force_rerun)
aln.id = aln_id
aln.annotations['a_seq'] = self.representative_sequence.id
aln.annotations['b_seq'] = seqprop.id
if parse:
aln_df = ssbio.protein.sequence.utils.alignment.get_alignment_df(a_aln_seq=str(list(aln)[0].seq),
b_aln_seq=str(list(aln)[1].seq))
aln.annotations['ssbio_type'] = 'seqalign'
aln.annotations['mutations'] = ssbio.protein.sequence.utils.alignment.get_mutations(aln_df)
aln.annotations['deletions'] = ssbio.protein.sequence.utils.alignment.get_deletions(aln_df)
aln.annotations['insertions'] = ssbio.protein.sequence.utils.alignment.get_insertions(aln_df)
return aln
sequences_rdd = sc.parallelize(filter(lambda x: x.id != self.representative_sequence.id, self.sequences))
result = sequences_rdd.map(lambda x: pairwise_sc(self, x)).collect()
for r in result:
self.sequence_alignments.append(r) | 0.007607 |
def _load_profile_imports(self, symbol_table):
"""
profile_imports is a list of module names or tuples
of (module_name, names to import)
in the form of ('.', names) it behaces like:
from . import name1, name2, name3
or similarly
import .name1, .name2, .name3
i.e. "name" in names becomes ".name"
"""
results = []
if 'profile_imports' not in symbol_table:
return results
package = self._get_package(symbol_table)
profile_imports = symbol_table['profile_imports']
for item in profile_imports:
if isinstance(item, str):
# import the whole module
module_name, names = (item, None)
else:
# expecting a 2 items tuple or list
# import only the names from the module
module_name, names = item
if '.' in module_name and len(set(module_name)) == 1 and names is not None:
# if you execute `from . import mod` from a module in the pkg package
# then you will end up importing pkg.mod
module_names = [f'{module_name}{name}' for name in names]
names = None
else:
module_names = [module_name]
for module_name in module_names:
module = importlib.import_module(module_name, package=package)
if names is None:
results.append(module)
else:
# 1. check if the imported module has an attribute by that name
# 2. if not, attempt to import a submodule with that name
# 3. if the attribute is not found, ImportError is raised.
# …
for name in names:
try:
results.append(getattr(module, name))
except AttributeError:
# attempt to import a submodule with that name
sub_module_name = '.'.join([module_name, name])
sub_module = importlib.import_module(sub_module_name, package=package)
results.append(sub_module)
return results | 0.010703 |
def _char_to_string_binary(c, align=ALIGN.LEFT, padding='-'):
"""
>>> _char_to_string_binary('O', align=ALIGN.LEFT)
'O----------'
>>> _char_to_string_binary('O', align=ALIGN.RIGHT)
'----------O'
>>> _char_to_string_binary('O', align=ALIGN.CENTER)
'-----O-----'
"""
s_bin = mtalk.encode(c, encoding_type='binary', letter_sep=' ')
N = len(s_bin)
if align == ALIGN.LEFT:
s_align = "<"
elif align == ALIGN.RIGHT:
s_align = ">"
elif align == ALIGN.CENTER:
s_align = "^"
else:
raise NotImplementedError("align '%s' not allowed" % align)
s = "{0:" + padding + s_align + str(N) + "}"
return s.format(c) | 0.001445 |
def initmobile_view(request):
"""
Create lazy user with a password. Used from the Android app.
Also returns csrf token.
GET parameters:
username:
user's name
password:
user's password
"""
if 'username' in request.GET and 'password' in request.GET:
username = request.GET['username']
password = request.GET['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
else:
user = request.user
response = {
'username': user.username,
'csrftoken': get_token(request),
}
if not user.has_usable_password():
password = User.objects.make_random_password()
user.set_password(password)
user.save()
response['password'] = password
return HttpResponse(json.dumps(response)) | 0.001065 |
def prompt_4_value(question, choices = None, default = None, display_choices = True, display_indices = False, authorize_list = False, is_question = False, no_confirm = False, required = True, regex = None, regex_format = '', max_laps = 5, input = None, return_index = False):
"""
Prompt for a value
. .
:param question: Question to be asked
:param choices: List of authorized answers
:param default: Value suggested by default
:param display_choices: Display accepted choices
:param display_indices: Display the indice in the list next to the choice
:param authorize_list: Set to true if a list of answers may be accepted
:param is_question: Set to true to append a question mark
:param no_confirm: Set to true to not prompt for a confirmation of the value
:param required: Set to false if an empty answer is authorized
:param regex: TODO
:param regex_format TODO
:param max_laps: Exit after N laps
:param input: Used for unit testing
:return:
"""
if choices and display_choices and not display_indices:
question = question + ' (' + '/'.join(choices) + ')'
lap_n = 0
while True:
if lap_n >= max_laps:
printError('Automatically abording prompt loop after 5 failures')
return None
lap_n += 1
can_return = False
# Display the question, choices, and prompt for the answer
if is_question:
question = question + '? '
printError(question)
if choices and display_indices:
for c in choices:
printError('%3d. %s' % (choices.index(c), c))
printError('Enter the number corresponding to your choice: ', False)
choice = prompt(input)
# Set the default value if empty choice
if not choice or choice == '':
if default:
if no_confirm or prompt_4_yes_no('Use the default value (' + default + ')'):
#return default
choice = default
can_return = True
elif not required:
can_return = True
else:
printError('Error: you cannot leave this parameter empty.')
# Validate the value against a whitelist of choices
elif choices:
user_choices = [item.strip() for item in choice.split(',')]
if not authorize_list and len(user_choices) > 1:
printError('Error: multiple values are not supported; please enter a single value.')
else:
choice_valid = True
if display_indices and int(choice) < len(choices):
int_choice = choice
choice = choices[int(choice)]
else:
for c in user_choices:
if not c in choices:
printError('Invalid value (%s).' % c)
choice_valid = False
break
if choice_valid:
can_return = True
# Validate against a regex
elif regex:
if regex.match(choice):
#return choice
can_return = True
else:
printError('Error: expected format is: %s' % regex_format)
else:
# No automated validation, can attempt to return
can_return = True
if can_return:
# Manually onfirm that the entered value is correct if needed
if no_confirm or prompt_4_yes_no('You entered "' + choice + '". Is that correct', input=input):
return int(int_choice) if return_index else choice | 0.009772 |
def bulkCmd(snmpDispatcher, authData, transportTarget,
nonRepeaters, maxRepetitions, *varBinds, **options):
"""Creates a generator to perform one or more SNMP GETBULK queries.
On each iteration, new SNMP GETBULK request is send
(:RFC:`1905#section-4.2.3`). The iterator blocks waiting for response
to arrive or error to occur.
Parameters
----------
snmpDispatcher : :py:class:`~pysnmp.hlapi.snmpDispatcher`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
nonRepeaters : int
One MIB variable is requested in response for the first
`nonRepeaters` MIB variables in request.
maxRepetitions : int
`maxRepetitions` MIB variables are requested in response for each
of the remaining MIB variables in the request (e.g. excluding
`nonRepeaters`). Remote SNMP engine may choose lesser value than
requested.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
* `lexicographicMode` - walk SNMP agent's MIB till the end (if `True`),
otherwise (if `False`) stop iteration when all response MIB
variables leave the scope of initial MIB variables in
`varBinds`. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
* `maxRows` - stop iteration once this generator instance processed
`maxRows` of SNMP conceptual table. Default is `0` (no limit).
* `maxCalls` - stop iteration once this generator instance processed
`maxCalls` responses. Default is 0 (no limit).
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to \*varBinds[errorIndex-1]
varBinds: tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `bulkCmd` generator will be exhausted on any of the following
conditions:
* SNMP engine error occurs thus `errorIndication` is `True`
* SNMP PDU `errorStatus` is reported as `True`
* SNMP :py:class:`~pysnmp.proto.rfc1905.EndOfMibView` values
(also known as *SNMP exception values*) are reported for all
MIB variables in `varBinds`
* *lexicographicMode* option is `True` and SNMP agent reports
end-of-mib or *lexicographicMode* is `False` and all
response MIB variables leave the scope of `varBinds`
At any moment a new sequence of `varBinds` could be send back into
running generator (supported since Python 2.6).
Setting `maxRepetitions` value to 15..50 might significantly improve
system performance, as many MIB variables get packed into a single
response message at once.
Examples
--------
>>> from pysnmp.hlapi.v1arch import *
>>>
>>> g = bulkCmd(snmpDispatcher(),
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> 0, 25,
>>> ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr')))
>>> next(g)
(None, 0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
>>> g.send([ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets'))])
(None, 0, 0, [[(ObjectName('1.3.6.1.2.1.2.2.1.10.1'), Counter32(284817787))]])
"""
def cbFun(*args, **kwargs):
response[:] = args + (kwargs.get('nextVarBinds', ()),)
options['cbFun'] = cbFun
lexicographicMode = options.pop('lexicographicMode', True)
maxRows = options.pop('maxRows', 0)
maxCalls = options.pop('maxCalls', 0)
initialVarBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
nullVarBinds = [False] * len(initialVarBinds)
totalRows = totalCalls = 0
errorIndication, errorStatus, errorIndex, varBindTable = None, 0, 0, ()
response = []
stopFlag = False
while not stopFlag:
if not varBinds:
yield (errorIndication, errorStatus, errorIndex, varBinds)
return
if maxRows and totalRows < maxRows:
maxRepetitions = min(maxRepetitions, maxRows - totalRows)
cmdgen.bulkCmd(snmpDispatcher, authData, transportTarget,
nonRepeaters, maxRepetitions,
*[(x[0], Null('')) for x in varBinds], **options)
snmpDispatcher.transportDispatcher.runDispatcher()
errorIndication, errorStatus, errorIndex, varBindTable, varBinds = response
if errorIndication:
yield (errorIndication, errorStatus, errorIndex, ())
return
elif errorStatus:
if errorStatus == 2:
# Hide SNMPv1 noSuchName error which leaks in here
# from SNMPv1 Agent through internal pysnmp proxy.
errorStatus = errorStatus.clone(0)
errorIndex = errorIndex.clone(0)
yield (errorIndication, errorStatus, errorIndex, varBindTable and varBindTable[0] or [])
return
else:
for rowIdx, varBindRow in enumerate(varBindTable):
stopFlag = True
if len(varBindRow) != len(initialVarBinds):
varBindTable = rowIdx and varBindTable[:rowIdx - 1] or []
break
for colIdx, varBind in enumerate(varBindRow):
name, val = varBind
if nullVarBinds[colIdx]:
varBindRow[colIdx] = name, endOfMibView
continue
stopFlag = False
if isinstance(val, Null):
nullVarBinds[colIdx] = True
elif not lexicographicMode and not initialVarBinds[colIdx][0].isPrefixOf(name):
varBindRow[colIdx] = name, endOfMibView
nullVarBinds[colIdx] = True
if stopFlag:
varBindTable = rowIdx and varBindTable[:rowIdx - 1] or []
break
totalRows += len(varBindTable)
totalCalls += 1
if maxRows and totalRows >= maxRows:
if totalRows > maxRows:
varBindTable = varBindTable[:-(totalRows - maxRows)]
stopFlag = True
if maxCalls and totalCalls >= maxCalls:
stopFlag = True
for varBindRow in varBindTable:
nextVarBinds = (yield errorIndication, errorStatus, errorIndex, varBindRow)
if nextVarBinds:
initialVarBinds = varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, nextVarBinds) | 0.002264 |
async def read(response, loads=loads, encoding=None):
"""
read the data of the response
Parameters
----------
response : aiohttp.ClientResponse
response
loads : callable
json loads function
encoding : :obj:`str`, optional
character encoding of the response, if set to None
aiohttp should guess the right encoding
Returns
-------
:obj:`bytes`, :obj:`str`, :obj:`dict` or :obj:`list`
the data returned depends on the response
"""
ctype = response.headers.get('Content-Type', "").lower()
try:
if "application/json" in ctype:
logger.info("decoding data as json")
return await response.json(encoding=encoding, loads=loads)
if "text" in ctype:
logger.info("decoding data as text")
return await response.text(encoding=encoding)
except (UnicodeDecodeError, json.JSONDecodeError) as exc:
data = await response.read()
raise exceptions.PeonyDecodeError(response=response,
data=data,
exception=exc)
return await response.read() | 0.000841 |
def possible_params(self):
""" Used when assuming params is a list. """
return self.params if isinstance(self.params, list) else [self.params] | 0.012658 |
def airline_delay(data_set='airline_delay', num_train=700000, num_test=100000, seed=default_seed):
"""Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence"""
if not data_available(data_set):
download_data(data_set)
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'filtered_data.pickle')
# 1. Load the dataset
import pandas as pd
data = pd.read_pickle(filename)
# WARNING: removing year
data.pop('Year')
# Get data matrices
Yall = data.pop('ArrDelay').values[:,None]
Xall = data.values
# Subset the data (memory!!)
all_data = num_train+num_test
Xall = Xall[:all_data]
Yall = Yall[:all_data]
# Get testing points
np.random.seed(seed=seed)
N_shuffled = permute(Yall.shape[0])
train, test = N_shuffled[num_test:], N_shuffled[:num_test]
X, Y = Xall[train], Yall[train]
Xtest, Ytest = Xall[test], Yall[test]
covariates = ['month', 'day of month', 'day of week', 'departure time', 'arrival time', 'air time', 'distance to travel', 'age of aircraft / years']
response = ['delay']
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "Airline delay data used for demonstrating Gaussian processes for big data.", 'covariates': covariates, 'response': response}, data_set) | 0.005755 |
def interactive(self):
"""Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data | 0.003937 |
def __git_commit(git_tag):
"""
Commit files to branch.
The function call will return 0 if the command success.
"""
Shell.msg('Commit changes.')
if APISettings.DEBUG:
Shell.debug('Execute "git commit" in dry mode.')
if not call(['git', 'commit', '-m', '\'' + git_tag + '\'', '--dry-run']):
pass
return True
if not call(['git', 'commit', '-m', '\'' + git_tag + '\'']):
return True
return False | 0.005769 |
def set_log_type_name(self, logType, name):
"""
Set a logtype name.
:Parameters:
#. logType (string): A defined logging type.
#. name (string): The logtype new name.
"""
assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" %logType
assert isinstance(name, basestring), "name must be a string"
name = str(name)
self.__logTypeNames[logType] = name | 0.008734 |
def copy(self, other, ignore=None, parameters=None, parameter_names=None,
read_args=None, write_args=None):
"""Copies metadata, info, and samples in this file to another file.
Parameters
----------
other : str or InferenceFile
The file to write to. May be either a string giving a filename,
or an open hdf file. If the former, the file will be opened with
the write attribute (note that if a file already exists with that
name, it will be deleted).
ignore : (list of) strings
Don't copy the given groups. If the samples group is included, no
samples will be copied.
parameters : list of str, optional
List of parameters in the samples group to copy. If None, will copy
all parameters.
parameter_names : dict, optional
Rename one or more parameters to the given name. The dictionary
should map parameter -> parameter name. If None, will just use the
original parameter names.
read_args : dict, optional
Arguments to pass to ``read_samples``.
write_args : dict, optional
Arguments to pass to ``write_samples``.
Returns
-------
InferenceFile
The open file handler to other.
"""
if not isinstance(other, h5py.File):
# check that we're not trying to overwrite this file
if other == self.name:
raise IOError("destination is the same as this file")
other = self.__class__(other, 'w')
# metadata
self.copy_metadata(other)
# info
if ignore is None:
ignore = []
if isinstance(ignore, (str, unicode)):
ignore = [ignore]
self.copy_info(other, ignore=ignore)
# samples
if self.samples_group not in ignore:
self.copy_samples(other, parameters=parameters,
parameter_names=parameter_names,
read_args=read_args,
write_args=write_args)
# if any down selection was done, re-set the default
# thin-start/interval/end
p = self[self.samples_group].keys()[0]
my_shape = self[self.samples_group][p].shape
p = other[other.samples_group].keys()[0]
other_shape = other[other.samples_group][p].shape
if my_shape != other_shape:
other.attrs['thin_start'] = 0
other.attrs['thin_interval'] = 1
other.attrs['thin_end'] = None
return other | 0.001118 |
def _add_outcome_provenance(self, association, outcome):
"""
:param association: str association curie
:param outcome: dict (json)
:return: None
"""
provenance = Provenance(self.graph)
base = self.curie_map.get_base()
provenance.add_agent_to_graph(base, 'Monarch Initiative')
self.graph.addTriple(association, self.globaltt['asserted_by'], base) | 0.004785 |
def refresh(self):
"""Re-pulls the data from redis"""
redis_key = EXPERIMENT_REDIS_KEY_TEMPLATE % self.experiment.name
self.plays = int(self.experiment.redis.hget(redis_key, "%s:plays" % self.name) or 0)
self.rewards = int(self.experiment.redis.hget(redis_key, "%s:rewards" % self.name) or 0)
self.performance = float(self.rewards) / max(self.plays, 1) | 0.010178 |
def _EnsureFileExists(self):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(self._filename):
old_umask = os.umask(0o177)
try:
open(self._filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True | 0.005141 |
def _dismantle_callsign(self, callsign, timestamp=timestamp_now):
""" try to identify the callsign's identity by analyzing it in the following order:
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Raises:
KeyError: Callsign could not be identified
"""
entire_callsign = callsign.upper()
if re.search('[/A-Z0-9\-]{3,15}', entire_callsign): # make sure the call has at least 3 characters
if re.search('\-\d{1,3}$', entire_callsign): # cut off any -10 / -02 appendixes
callsign = re.sub('\-\d{1,3}$', '', entire_callsign)
if re.search('/[A-Z0-9]{1,4}/[A-Z0-9]{1,4}$', callsign):
callsign = re.sub('/[A-Z0-9]{1,4}$', '', callsign) # cut off 2. appendix DH1TW/HC2/P -> DH1TW/HC2
# multiple character appendix (callsign/xxx)
if re.search('[A-Z0-9]{4,10}/[A-Z0-9]{2,4}$', callsign): # case call/xxx, but ignoring /p and /m or /5
appendix = re.search('/[A-Z0-9]{2,4}$', callsign)
appendix = re.sub('/', '', appendix.group(0))
self._logger.debug("appendix: " + appendix)
if appendix == 'MM': # special case Martime Mobile
#self._mm = True
return {
'adif': 999,
'continent': '',
'country': 'MARITIME MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'AM': # special case Aeronautic Mobile
return {
'adif': 998,
'continent': '',
'country': 'AIRCAFT MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'QRP': # special case QRP
callsign = re.sub('/QRP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'QRPP': # special case QRPP
callsign = re.sub('/QRPP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'BCN': # filter all beacons
callsign = re.sub('/BCN', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif appendix == "LH": # Filter all Lighthouses
callsign = re.sub('/LH', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif re.search('[A-Z]{3}', appendix): #case of US county(?) contest N3HBX/UAL
callsign = re.sub('/[A-Z]{3}$', '', callsign)
return self._iterate_prefix(callsign, timestamp)
else:
# check if the appendix is a valid country prefix
return self._iterate_prefix(re.sub('/', '', appendix), timestamp)
# Single character appendix (callsign/x)
elif re.search('/[A-Z0-9]$', callsign): # case call/p or /b /m or /5 etc.
appendix = re.search('/[A-Z0-9]$', callsign)
appendix = re.sub('/', '', appendix.group(0))
if appendix == 'B': # special case Beacon
callsign = re.sub('/B', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif re.search('\d$', appendix):
area_nr = re.search('\d$', appendix).group(0)
callsign = re.sub('/\d$', '', callsign) #remove /number
if len(re.findall(r'\d+', callsign)) == 1: #call has just on digit e.g. DH1TW
callsign = re.sub('[\d]+', area_nr, callsign)
else: # call has several digits e.g. 7N4AAL
pass # no (two) digit prefix contries known where appendix would change entitiy
return self._iterate_prefix(callsign, timestamp)
else:
return self._iterate_prefix(callsign, timestamp)
# regular callsigns, without prefix or appendix
elif re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', callsign):
return self._iterate_prefix(callsign, timestamp)
# callsigns with prefixes (xxx/callsign)
elif re.search('^[A-Z0-9]{1,4}/', entire_callsign):
pfx = re.search('^[A-Z0-9]{1,4}/', entire_callsign)
pfx = re.sub('/', '', pfx.group(0))
#make sure that the remaining part is actually a callsign (avoid: OZ/JO81)
rest = re.search('/[A-Z0-9]+', entire_callsign)
rest = re.sub('/', '', rest.group(0))
if re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', rest):
return self._iterate_prefix(pfx)
if entire_callsign in callsign_exceptions:
return self._iterate_prefix(callsign_exceptions[entire_callsign])
self._logger.debug("Could not decode " + callsign)
raise KeyError("Callsign could not be decoded") | 0.007511 |
def push_scope(self, callback=None): # noqa
"""Pushes a new layer on the scope stack. Returns a context manager
that should be used to pop the scope again. Alternatively a callback
can be provided that is executed in the context of the scope.
"""
if callback is not None:
with self.push_scope() as scope:
callback(scope)
return None
client, scope = self._stack[-1]
new_layer = (client, copy.copy(scope))
self._stack.append(new_layer)
return _ScopeManager(self) | 0.003466 |
def equation(self):
"""Mix-in class that returns matrix rows for leaky wall condition.
Qnormal = resfac * (headin - headout)
Returns matrix part (nunknowns,neq)
Returns rhs part nunknowns
"""
mat = np.empty((self.nunknowns, self.model.neq))
rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero
for icp in range(self.ncp):
istart = icp * self.nlayers
ieq = 0
for e in self.model.elementlist:
if e.nunknowns > 0:
qx, qy = e.disvecinflayers(self.xc[icp], self.yc[icp], self.layers)
mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \
qx * self.cosnorm[icp] + qy * self.sinnorm[icp] - self.resfac[:, np.newaxis] * \
(e.potinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aq) / self.aq.Tcol[
self.layers] - \
e.potinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aq) / self.aq.Tcol[
self.layers])
ieq += e.nunknowns
else:
qx, qy = e.disveclayers(self.xc[icp], self.yc[icp], self.layers)
rhs[istart:istart + self.nlayers] -= qx * self.cosnorm[icp] + qy * self.sinnorm[icp] + self.resfac * \
(e.potentiallayers(self.xcin[icp], self.ycin[icp], self.layers,
aq=self.aq) / self.aq.T[self.layers] -
e.potentiallayers(self.xcout[icp], self.ycout[icp],
self.layers, aq=self.aq) / self.aq.T[
self.layers])
return mat, rhs | 0.007645 |
def data_context(fn, mode="r"):
"""
Return content fo the `fn` from the `template_data` directory.
"""
with open(data_context_name(fn), mode) as f:
return f.read() | 0.005348 |
def nexttime(self, lastts):
'''
Returns next timestamp that meets requirements, incrementing by (self.incunit * incval) if not increasing, or
0.0 if there are no future matches
'''
lastdt = datetime.datetime.fromtimestamp(lastts, tz.utc)
newvals = {} # all the new fields that will be changed in datetime of lastts
# Truncate the seconds part
newdt = lastdt.replace(second=0)
for unit, newval in self.reqdict.items():
dtkey = _TimeunitToDatetime[unit]
if unit is TimeUnit.DAYOFWEEK:
newdt = newdt.replace(**newvals)
newvals = {}
newval = newdt.day + (6 + newval - newdt.weekday()) % 7 + 1
if newval > calendar.monthrange(newdt.year, newdt.month)[1]:
newval -= 7
elif unit is TimeUnit.MONTH:
# As we change the month, clamp the day of the month to a valid value
newdt = newdt.replace(**newvals)
newvals = {}
dayval = _dayofmonth(newdt.day, newval, newdt.year)
newvals['day'] = dayval
elif unit is TimeUnit.DAYOFMONTH:
newdt = newdt.replace(**newvals)
newvals = {}
newval = _dayofmonth(newval, newdt.month, newdt.year)
newvals[dtkey] = newval
newdt = newdt.replace(**newvals)
# Then move forward if we have to
if newdt <= lastdt or \
self.incunit == TimeUnit.DAYOFWEEK and newdt.weekday() != self.incval:
if self.incunit is None:
largest_req = min(self.reqdict.keys())
tmpunit = _NextUnitMap[largest_req]
if tmpunit is None: # required a year and we're already there
return 0.0
# Unless we're going to the next day of week, increment by 1 unit of the next larger unit
tmpincval = self.reqdict.get(TimeUnit.DAYOFWEEK, 1)
else:
tmpunit = self.incunit
tmpincval = self.incval
newdt = self._inc(tmpunit, tmpincval, self.reqdict, lastdt, newdt)
assert newdt > lastdt
return newdt.timestamp() | 0.003093 |
def is_logged_in(self, name_id):
""" Check if user is in the cache
:param name_id: The identifier of the subject
"""
identity = self.users.get_identity(name_id)[0]
return bool(identity) | 0.00885 |
def is_group_or_super_group(cls, obj) -> bool:
"""
Check chat is group or super-group
:param obj:
:return:
"""
return cls._check(obj, [cls.GROUP, cls.SUPER_GROUP]) | 0.009434 |
def requeue(self):
"""Loop endlessly and requeue expired jobs."""
job_requeue_interval = float(
self.config.get('sharq', 'job_requeue_interval'))
while True:
self.sq.requeue()
gevent.sleep(job_requeue_interval / 1000.00) | 0.007143 |
def hashes(self, trust_internet=True):
# type: (bool) -> Hashes
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.options.get('hashes', {}).copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes) | 0.00291 |
def generateIndex(self, refresh=0, refresh_index=0):
"""Writes the index file"""
open(self.index_file, "wt").write(self.renderIndex(refresh=refresh, refresh_index=refresh_index)) | 0.015464 |
def experimentVaryingSynapseSampling(expParams,
sampleSizeDistalList,
sampleSizeProximalList):
"""
Test multi-column convergence with varying amount of proximal/distal sampling
:return:
"""
numRpts = 20
df = None
args = []
for sampleSizeProximal in sampleSizeProximalList:
for sampleSizeDistal in sampleSizeDistalList:
for rpt in range(numRpts):
l4Params = getL4Params()
l2Params = getL2Params()
l2Params["sampleSizeProximal"] = sampleSizeProximal
l2Params["minThresholdProximal"] = sampleSizeProximal
l2Params["sampleSizeDistal"] = sampleSizeDistal
l2Params["activationThresholdDistal"] = sampleSizeDistal
args.append(
{
"numObjects": expParams['numObjects'],
"numLocations": expParams['numLocations'],
"numFeatures": expParams['numFeatures'],
"numColumns": expParams['numColumns'],
"trialNum": rpt,
"l4Params": l4Params,
"l2Params": l2Params,
"profile": True,
"objectSeed": rpt,
}
)
pool = Pool(processes=expParams['numWorkers'])
result = pool.map(runExperiment, args)
#
# if df is None:
# df = pd.DataFrame.from_dict(result, orient='index')
# else:
# df = pd.concat([df, pd.DataFrame.from_dict(result, orient='index')], axis=1)
#
# df = df.transpose()
return result | 0.011897 |
def get_header_url(response, header_name):
"""Get a URL from a header requests.
:param requests.Response response: REST call response.
:param str header_name: Header name.
:returns: URL if not None AND valid, None otherwise
"""
url = response.headers.get(header_name)
try:
_validate(url)
except ValueError:
return None
else:
return url | 0.002525 |
def insert_optimization_option_group(parser):
"""
Adds the options used to specify optimization-specific options.
Parameters
----------
parser : object
OptionParser instance
"""
optimization_group = parser.add_argument_group("Options for selecting "
"optimization-specific settings")
optimization_group.add_argument("--cpu-affinity", help="""
A set of CPUs on which to run, specified in a format suitable
to pass to taskset.""")
optimization_group.add_argument("--cpu-affinity-from-env", help="""
The name of an enivornment variable containing a set
of CPUs on which to run, specified in a format suitable
to pass to taskset.""") | 0.003699 |
def _load_view(self, template_engine_name, template_dir):
"""
Load view by name and return an instance.
"""
file_name = template_engine_name.lower()
class_name = "{}View".format(template_engine_name.title())
try:
view_module = import_module("rails.views.{}".format(file_name))
except ImportError:
raise Exception("Template engine '{}' not found in 'rails.views'".format(file_name))
view_class = getattr(view_module, class_name)
return view_class(template_dir) | 0.005405 |
def expo(base=2, factor=1, max_value=None):
"""Generator for exponential decay.
Args:
base: The mathematical base of the exponentiation operation
factor: Factor to multiply the exponentation by.
max_value: The maximum value to yield. Once the value in the
true exponential sequence exceeds this, the value
of max_value will forever after be yielded.
"""
n = 0
while True:
a = factor * base ** n
if max_value is None or a < max_value:
yield a
n += 1
else:
yield max_value | 0.001664 |
def close_streaming_interface(self):
"""Called when someone closes the streaming interface to the device.
This method will automatically notify sensor_graph that there is a no
longer a streaming interface opened.
"""
super(ReferenceDevice, self).close_streaming_interface()
self.rpc(8, rpcs.SG_GRAPH_INPUT, 8, streams.COMM_TILE_CLOSED) | 0.005181 |
def get(self, **kwargs):
"""
Queries database for results of view
:return:
"""
db_url = ':'.join([options.url_registry_db, str(options.db_port)])
db = couch.AsyncCouch(db_name=self.db_name, couch_url=db_url)
result = yield db.view(
design_doc_name=self.name,
view_name=self.name,
**kwargs)
raise Return(result) | 0.004854 |
def create(name, url, tournament_type="single elimination", **params):
"""Create a new tournament."""
params.update({
"name": name,
"url": url,
"tournament_type": tournament_type,
})
return api.fetch_and_parse("POST", "tournaments", "tournament", **params) | 0.003367 |
def enhex(d, separator=''):
"""
Convert bytes to their hexadecimal representation, optionally joined by a
given separator.
Args:
d(bytes): The data to convert to hexadecimal representation.
separator(str): The separator to insert between hexadecimal tuples.
Returns:
str: The hexadecimal representation of ``d``.
Examples:
>>> from pwny import *
>>> enhex(b'pwnypack')
'70776e797061636b'
>>> enhex(b'pwnypack', separator=' ')
'70 77 6e 79 70 61 63 6b'
"""
v = binascii.hexlify(d).decode('ascii')
if separator:
return separator.join(
v[i:i+2]
for i in range(0, len(v), 2)
)
else:
return v | 0.001346 |
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o | 0.007143 |
def add_subparser(subparsers):
"""Add command line arguments as server subparser.
"""
parser = subparsers.add_parser("server", help="Run a bcbio-nextgen server allowing remote job execution.")
parser.add_argument("-c", "--config", help=("Global YAML configuration file specifying system details."
"Defaults to installed bcbio_system.yaml"))
parser.add_argument("-p", "--port", help="Port to listen on (default 8080)",
default=8080, type=int)
parser.add_argument("-n", "--cores", help="Cores to use when processing locally when not requested (default 1)",
default=1, type=int)
parser.add_argument("-d", "--biodata_dir", help="Directory with biological data",
default="/mnt/biodata", type=str)
return parser | 0.008168 |
def add_output_file(self, filename):
"""
Add filename as a output file for this DAG node.
@param filename: output filename to add
"""
if filename not in self.__output_files:
self.__output_files.append(filename)
if not isinstance(self.job(), CondorDAGManJob):
if self.job().get_universe() == 'grid':
self.add_output_macro(filename) | 0.010499 |
def removeComponent(self, row,col):
"""Removes the component at the given location
:param row: track location of existing component to remove
:type row: int
:param col: location in track of existing component to remove
:type col: int
"""
self._segments[row].pop(col)
# If this row is now empty we should remove it?
if self.columnCountForRow(-1) == 0:
self.removeRow(len(self._segments)-1)
# in case of samplerate change, just always update
self.updateCalibration() | 0.005291 |
def estimate_clock_model(params):
"""
implementing treetime clock
"""
if assure_tree(params, tmp_dir='clock_model_tmp'):
return 1
dates = utils.parse_dates(params.dates)
if len(dates)==0:
return 1
outdir = get_outdir(params, '_clock')
###########################################################################
### READ IN VCF
###########################################################################
#sets ref and fixed_pi to None if not VCF
aln, ref, fixed_pi = read_if_vcf(params)
is_vcf = True if ref is not None else False
###########################################################################
### ESTIMATE ROOT (if requested) AND DETERMINE TEMPORAL SIGNAL
###########################################################################
if params.aln is None and params.sequence_length is None:
print("one of arguments '--aln' and '--sequence-length' is required.", file=sys.stderr)
return 1
basename = get_basename(params, outdir)
myTree = TreeTime(dates=dates, tree=params.tree, aln=aln, gtr='JC69',
verbose=params.verbose, seq_len=params.sequence_length,
ref=ref)
myTree.tip_slack=params.tip_slack
if myTree.tree is None:
print("ERROR: tree loading failed. exiting...")
return 1
if params.clock_filter:
n_bad = [n.name for n in myTree.tree.get_terminals() if n.bad_branch]
myTree.clock_filter(n_iqd=params.clock_filter, reroot=params.reroot or 'least-squares')
n_bad_after = [n.name for n in myTree.tree.get_terminals() if n.bad_branch]
if len(n_bad_after)>len(n_bad):
print("The following leaves don't follow a loose clock and "
"will be ignored in rate estimation:\n\t"
+"\n\t".join(set(n_bad_after).difference(n_bad)))
if not params.keep_root:
# reroot to optimal root, this assigns clock_model to myTree
if params.covariation: # this requires branch length estimates
myTree.run(root="least-squares", max_iter=0,
use_covariation=params.covariation)
res = myTree.reroot(params.reroot,
force_positive=not params.allow_negative_rate)
myTree.get_clock_model(covariation=params.covariation)
if res==ttconf.ERROR:
print("ERROR: unknown root or rooting mechanism!\n"
"\tvalid choices are 'least-squares', 'ML', and 'ML-rough'")
return 1
else:
myTree.get_clock_model(covariation=params.covariation)
d2d = utils.DateConversion.from_regression(myTree.clock_model)
print('\n',d2d)
print('The R^2 value indicates the fraction of variation in'
'\nroot-to-tip distance explained by the sampling times.'
'\nHigher values corresponds more clock-like behavior (max 1.0).')
print('\nThe rate is the slope of the best fit of the date to'
'\nthe root-to-tip distance and provides an estimate of'
'\nthe substitution rate. The rate needs to be positive!'
'\nNegative rates suggest an inappropriate root.\n')
print('\nThe estimated rate and tree correspond to a root date:')
if params.covariation:
reg = myTree.clock_model
dp = np.array([reg['intercept']/reg['slope']**2,-1./reg['slope']])
droot = np.sqrt(reg['cov'][:2,:2].dot(dp).dot(dp))
print('\n--- root-date:\t %3.2f +/- %1.2f (one std-dev)\n\n'%(-d2d.intercept/d2d.clock_rate, droot))
else:
print('\n--- root-date:\t %3.2f\n\n'%(-d2d.intercept/d2d.clock_rate))
if not params.keep_root:
# write rerooted tree to file
outtree_name = basename+'rerooted.newick'
Phylo.write(myTree.tree, outtree_name, 'newick')
print("--- re-rooted tree written to \n\t%s\n"%outtree_name)
table_fname = basename+'rtt.csv'
with open(table_fname, 'w') as ofile:
ofile.write("#name, date, root-to-tip distance\n")
ofile.write("#Dates of nodes that didn't have a specified date are inferred from the root-to-tip regression.\n")
for n in myTree.tree.get_terminals():
if hasattr(n, "raw_date_constraint") and (n.raw_date_constraint is not None):
if np.isscalar(n.raw_date_constraint):
tmp_str = str(n.raw_date_constraint)
elif len(n.raw_date_constraint):
tmp_str = str(n.raw_date_constraint[0])+'-'+str(n.raw_date_constraint[1])
else:
tmp_str = ''
ofile.write("%s, %s, %f\n"%(n.name, tmp_str, n.dist2root))
else:
ofile.write("%s, %f, %f\n"%(n.name, d2d.numdate_from_dist2root(n.dist2root), n.dist2root))
for n in myTree.tree.get_nonterminals(order='preorder'):
ofile.write("%s, %f, %f\n"%(n.name, d2d.numdate_from_dist2root(n.dist2root), n.dist2root))
print("--- wrote dates and root-to-tip distances to \n\t%s\n"%table_fname)
###########################################################################
### PLOT AND SAVE RESULT
###########################################################################
plot_rtt(myTree, outdir+params.plot_rtt)
return 0 | 0.006222 |
def QA_fetch_get_hkindex_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==27') | 0.001541 |
def invert(m):
"""
Generate the inverse of a 3x3 matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/invert_c.html
:param m: Matrix to be inverted.
:type m: 3x3-Element Array of floats
:return: Inverted matrix (m1)^-1
:rtype: 3x3-Element Array of floats
"""
m = stypes.toDoubleMatrix(m)
mout = stypes.emptyDoubleMatrix()
libspice.invert_c(m, mout)
return stypes.cMatrixToNumpy(mout) | 0.002252 |
def chi2_adaptive_binning(features_0,features_1,number_of_splits_list,systematics_fraction=0.0,title = "title", name="name", PLOT = True, DEBUG = False, transform='StandardScalar'):
"""This function takes in two 2D arrays with all features being columns"""
max_number_of_splits = np.max(number_of_splits_list)
#determine how many data points are in each sample
no_0=features_0.shape[0]
no_1=features_1.shape[0]
print("features_0.shape : ", features_0.shape)
no_dim = features_0.shape[1]
#Give all samples in file 0 the label 0 and in file 1 the feature 1
label_0=np.zeros((no_0,1))
label_1=np.ones((no_1,1))
#Create an array containing samples and features.
data_0=np.c_[features_0,label_0]
data_1=np.c_[features_1,label_1]
features= np.r_[features_0,features_1]
labels= np.r_[label_0, label_1]
data=np.r_[data_0,data_1]
data_same=np.c_[features,labels]
#print("data : ",data)
#print("data_same : ", data_same)
#print("np.sum(data!=data_same) : ",np.sum(data!=data_same))
assert np.sum(data!=data_same)==0
assert (no_dim == data.shape[1]-1)
if no_dim==2:
plt.scatter(features[:,0],features[:,1], 0.1)
plt.savefig('test.png')
plt.clf()
if transform=='StandardScalar':
features = preprocessing.scale(features)
data = np.c_[features,labels]
if transform=='uniform':
#data_new2 = data[:,0]
data_new = norm_highD_searchsorted(data[:,0])
for D in range(1,no_dim):
temp = norm_highD_searchsorted(data[:,D])
data_new = np.c_[data_new,temp]
#data_new2= np.c_[data_new2,data[:,D]]
data_new = np.c_[data_new, np.r_[label_0,label_1]]
#data_new2= np.c_[data_new2,np.r_[label_0,label_1]]
print("data : ", data)
data = data_new
print("data new : ", data)
#print("data_new2 : ", data_new2)
#print("np.sum(data!=data_new2) : ",np.sum(data!=data_new2))
np.random.shuffle(data)
assert (no_dim == data.shape[1]-1)
labels=data[:,-1]
X_values= data[:,:-1]
X_max = np.amax(data,axis=0)[:-1]
X_min = np.amin(data,axis=0)[:-1]
X_total_width = (np.subtract(X_max,X_min))
del data
if transform=='fill01':
#Scaling
X_values = X_values - X_min[None,:]
X_values = X_values / X_total_width[None,:]
if True:
X_min = [0.]*no_dim
X_total_width = [1.]*no_dim
#b = X_values[:,0]
#print("b[b[:]>2].shape[0] : \n", b[b[:]>2].shape[0] )
data = np.concatenate((X_values, labels[:,None]), axis=1)
if no_dim==2:
plt.scatter(data[:,0],data[:,1],0.1)
plt.savefig('test_scaled.png')
#print("X_values.shape : ",X_values.shape)
starting_boundary = []
for i in range(no_dim):
starting_boundary.append([0.0,1.0])
#Each key has the following stricture: # of splits and for each split if it was closer (a) or further away from (b) the origin. The original bin is "0"
#For example "2ab" means it is the bin that was closer to the origin for the first split and further away for the second one.
bin_boundaries_dict = {'0' : np.array(starting_boundary)}
bin_points_dict = {'0' : data}
for split_number in range(1,1+max_number_of_splits):
for bin_key, bin_boundary in bin_boundaries_dict.items():
if str(split_number-1) in bin_key:
variances= np.var(bin_points_dict[bin_key][:,:-1], axis=0)
#print("\nvariances : ", variances)
dim_to_be_sliced = np.argmax(variances)
#print("dim_to_be_sliced : ",dim_to_be_sliced)
#print("bin_points_dict[bin_key] : ",bin_points_dict[bin_key])
#print("bin_points_dict[bin_key][:,dim_to_be_sliced] : ",bin_points_dict[bin_key][:,dim_to_be_sliced])
median = np.median(bin_points_dict[bin_key][:,dim_to_be_sliced])
#print("median : ",median)
a_bin_boundary, b_bin_boundary = bin_boundary.copy(), bin_boundary.copy()
#print("a_bin_boundary : ",a_bin_boundary)
a_bin_boundary[dim_to_be_sliced,1] = median
b_bin_boundary[dim_to_be_sliced,0] = median
bin_boundaries_dict[str(split_number)+bin_key[1:]+'a'] = a_bin_boundary
bin_boundaries_dict[str(split_number)+bin_key[1:]+'b'] = b_bin_boundary
a_points, b_points = [],[]
for event_number in range(bin_points_dict[bin_key].shape[0]):
if bin_points_dict[bin_key][event_number,dim_to_be_sliced] < median: a_points.append(bin_points_dict[bin_key][event_number,:].tolist())
else: b_points.append(bin_points_dict[bin_key][event_number,:].tolist())
bin_points_dict[str(split_number)+bin_key[1:]+'a'] = np.array(a_points)
bin_points_dict[str(split_number)+bin_key[1:]+'b'] = np.array(b_points)
#If a bin contains no particles it should be deleted
if len(a_points)==0:
del bin_points_dict[str(split_number)+bin_key[1:]+'a']
del bin_boundaries_dict[str(split_number)+bin_key[1:]+'a']
if len(b_points)==0:
del bin_points_dict[str(split_number)+bin_key[1:]+'b']
del bin_boundaries_dict[str(split_number)+bin_key[1:]+'b']
if PLOT: pickle.dump( bin_boundaries_dict, open( "bin_boundaries_dict.p", "wb" ) )
bins_sample01_dict= {}
signed_Scp2_dict= {}
results_list = []
for number_of_splits in number_of_splits_list:
print("\nnumber_of_splits : ",number_of_splits,"\nsystematics_fraction : ",systematics_fraction)
bins_sample0, bins_sample1 = [] , []
for bin_key, bin_points in bin_points_dict.items():
if str(number_of_splits) in bin_key:
labels_in_bin = bin_points[:,-1]
#print("labels_in_bin : ",labels_in_bin)
bin_sample0 = np.count_nonzero( labels_in_bin == 0)
bin_sample1 = np.count_nonzero( labels_in_bin == 1)
#print("bin_sample0 : ",bin_sample0)
#print("bin_sample1 : ",bin_sample1)
#simulate uncertainties
if(systematics_fraction*float(bin_sample0)!=0.): bin_sample0 += int(round(np.random.normal(0.,systematics_fraction*float(bin_sample0))))
if(systematics_fraction*float(bin_sample1)!=0.): bin_sample1 += int(round(np.random.normal(0.,systematics_fraction*float(bin_sample1))))
bins_sample01_dict[bin_key]=[bin_sample0,bin_sample1]
signed_Scp2_dict[bin_key] = np.square(float(bin_sample1-bin_sample0))/(float(bin_sample1)+float(bin_sample0)+np.square(float(bin_sample1)*systematics_fraction)+np.square(float(bin_sample1)*systematics_fraction))*np.sign(bin_sample1-bin_sample0)
#print("\n\nbin_sample0 : ",bin_sample0, "\n bins_sample0 : ", bins_sample0 )
#print("type(bin_sample0) : ",type(bin_sample0), " type(bins_sample0) : ",type(bins_sample0))
bins_sample0.append(bin_sample0)
#print(" bins_sample0 : ", bins_sample0, "\n\n" )
bins_sample1.append(bin_sample1)
bins_sample0, bins_sample1 = np.array(bins_sample0,dtype=float), np.array(bins_sample1, dtype=float)
print("bins_sample0 : ",bins_sample0,"\n bins_sample1 : ",bins_sample1)
#element wise subtraction and division
Scp2 = ((bins_sample1-bins_sample0)**2)/ (bins_sample1+bins_sample0+(systematics_fraction*bins_sample1)**2+(systematics_fraction*bins_sample0)**2 )
#Scp2 = np.divide(np.square(np.subtract(bins_sample1,bins_sample0)),np.add(bins_sample1,bins_sample0))
if DEBUG:
print(Scp2)
#nansum ignores all the contributions that are Not A Number (NAN)
Chi2 = np.nansum(Scp2)
if DEBUG:
print("Chi2")
print(Chi2)
dof=bins_sample0.shape[0]-1
pvalue= 1 - stats.chi2.cdf(Chi2,dof)
print("\nThe p value for Scp2 = ",Scp2," and Chi2 = ", Chi2, " is ",pvalue,"\n\n")
if DEBUG:
print(bins_sample0)
print(bins_sample1)
print("Chi2/dof : {0}".format(str(Chi2/dof)))
print("pvalue : {0}".format(str(pvalue)))
results_list.append(pvalue)
if PLOT:
if no_dim==1: chi2_plots.adaptive_binning_1Dplot(bin_boundaries_dict,data,number_of_splits,title+" "+str(no_dim) + "D "+str(number_of_splits)+ " splits ",name+"_"+str(no_dim) + "D_chi2_"+str(number_of_splits)+"_splits")
if no_dim==2: chi2_plots.adaptive_binning_2Dplot(bin_boundaries_dict,signed_Scp2_dict,number_of_splits,X_values,title+" "+str(no_dim) + "D"+str(number_of_splits)+ " splits ",name+"_"+str(no_dim) + "D_chi2_"+str(number_of_splits)+"_splits", X_min= X_min,X_total_width=X_total_width )
if no_dim>1: chi2_plots.adaptive_binning_2D1Dplot(bin_boundaries_dict,bins_sample01_dict,number_of_splits,X_values,title+" "+str(no_dim) + "D"+str(number_of_splits)+ " splits ",name+"_"+str(no_dim) + "D_chi2_"+str(number_of_splits)+"_splits", no_dim)
return results_list | 0.044957 |
def splits(cls, text_field, root='.data', train='wiki.train.tokens',
validation='wiki.valid.tokens', test='wiki.test.tokens',
**kwargs):
"""Create dataset objects for splits of the WikiText-2 dataset.
This is the most flexible way to use the dataset.
Arguments:
text_field: The field that will be used for text data.
root: The root directory that the dataset's zip archive will be
expanded into; therefore the directory in whose wikitext-2
subdirectory the data files will be stored.
train: The filename of the train data. Default: 'wiki.train.tokens'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'wiki.valid.tokens'.
test: The filename of the test data, or None to not load the test
set. Default: 'wiki.test.tokens'.
"""
return super(WikiText2, cls).splits(
root=root, train=train, validation=validation, test=test,
text_field=text_field, **kwargs) | 0.004452 |
def CreatePrecisionHelper(cls, precision):
"""Creates a precision helper.
Args:
precision (str): precision of the date and time value, which should
be one of the PRECISION_VALUES in definitions.
Returns:
class: date time precision helper class.
Raises:
ValueError: if the precision value is unsupported.
"""
precision_helper_class = cls._PRECISION_CLASSES.get(precision, None)
if not precision_helper_class:
raise ValueError('Unsupported precision: {0!s}'.format(precision))
return precision_helper_class | 0.003484 |
def get_trace_entity(self):
"""
Return the current trace entity(segment/subsegment). If there is none,
it behaves based on pre-defined ``context_missing`` strategy.
"""
if not getattr(self._local, 'entities', None):
return self.handle_context_missing()
return self._local.entities[-1] | 0.005797 |
def get_db_prep_save(self, value, connection=None):
"""
Returns field's value prepared for saving into a database.
"""
## convert to settings.TIME_ZONE
if value is not None:
if value.tzinfo is None:
value = default_tz.localize(value)
else:
value = value.astimezone(default_tz)
return super(LocalizedDateTimeField, self).get_db_prep_save(value, connection=connection) | 0.008511 |
def update_credit_note_item(self, credit_note_item_id, credit_note_item_dict):
"""
Updates a credit note item
:param credit_note_item_id: the credit note item id
:param credit_note_item_dict: dict
:return: dict
"""
return self._create_put_request(
resource=CREDIT_NOTE_ITEMS,
billomat_id=credit_note_item_id,
send_data=credit_note_item_dict
) | 0.004515 |
def disk_free(path):
"""Return free bytes on partition holding `path`."""
stats = os.statvfs(path)
return stats.f_bavail * stats.f_frsize | 0.006711 |
def ballot_id(self):
"""
str: Ballot ID
"""
self._validate()
self._validate_division(self.division)
self._validate_for_ballot_id()
parts = []
parts.append(self.election_type)
if self.subtype:
parts.append(self.subtype)
if self.organisation:
parts.append(self.organisation)
if self.division:
parts.append(self.division)
if self.contest_type:
parts.append(self.contest_type)
parts.append(self.date)
return ".".join(parts) | 0.003448 |
def _load(self):
"""
Load editable settings from the database and return them as a dict.
Delete any settings from the database that are no longer registered,
and emit a warning if there are settings that are defined in both
settings.py and the database.
"""
from yacms.conf.models import Setting
removed_settings = []
conflicting_settings = []
new_cache = {}
for setting_obj in Setting.objects.all():
# Check that the Setting object corresponds to a setting that has
# been declared in code using ``register_setting()``. If not, add
# it to a list of items to be deleted from the database later.
try:
setting = registry[setting_obj.name]
except KeyError:
removed_settings.append(setting_obj.name)
continue
# Convert a string from the database to the correct Python type.
setting_value = self._to_python(setting, setting_obj.value)
# If a setting is defined both in the database and in settings.py,
# raise a warning and use the value defined in settings.py.
if hasattr(django_settings, setting["name"]):
if setting_value != setting["default"]:
conflicting_settings.append(setting_obj.name)
continue
# If nothing went wrong, use the value from the database!
new_cache[setting["name"]] = setting_value
if removed_settings:
Setting.objects.filter(name__in=removed_settings).delete()
if conflicting_settings:
warn("These settings are defined in both settings.py and "
"the database: %s. The settings.py values will be used."
% ", ".join(conflicting_settings))
return new_cache | 0.001053 |
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None | 0.017316 |
def get_log_by_name(log_group_name, log_stream_name, out_file=None,
verbose=True):
"""Download a log given the log's group and stream name.
Parameters
----------
log_group_name : str
The name of the log group, e.g. /aws/batch/job.
log_stream_name : str
The name of the log stream, e.g. run_reach_jobdef/default/<UUID>
Returns
-------
lines : list[str]
The lines of the log as a list.
"""
logs = boto3.client('logs')
kwargs = {'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'startFromHead': True}
lines = []
while True:
response = logs.get_log_events(**kwargs)
# If we've gotten all the events already, the nextForwardToken for
# this call will be the same as the last one
if response.get('nextForwardToken') == kwargs.get('nextToken'):
break
else:
events = response.get('events')
if events:
lines += ['%s: %s\n' % (evt['timestamp'], evt['message'])
for evt in events]
kwargs['nextToken'] = response.get('nextForwardToken')
if verbose:
logger.info('%d %s' % (len(lines), lines[-1]))
if out_file:
with open(out_file, 'wt') as f:
for line in lines:
f.write(line)
return lines | 0.000708 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.