text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _add_fluent_indexes(self):
"""
Add the index commands fluently specified on columns:
"""
for column in self._columns:
for index in ['primary', 'unique', 'index']:
column_index = column.get(index)
if column_index is True:
getattr(self, index)(column.name)
break
elif column_index:
getattr(self, index)(column.name, column_index)
break | 0.003906 |
def object_build_class(node, member, localname):
"""create astroid for a living class object"""
basenames = [base.__name__ for base in member.__bases__]
return _base_class_object_build(node, member, basenames, localname=localname) | 0.008264 |
def add(self, doc):
"""Add a doc's annotations to the binder for serialization."""
array = doc.to_array(self.attrs)
if len(array.shape) == 1:
array = array.reshape((array.shape[0], 1))
self.tokens.append(array)
spaces = doc.to_array(SPACY)
assert array.shape[0] == spaces.shape[0]
spaces = spaces.reshape((spaces.shape[0], 1))
self.spaces.append(numpy.asarray(spaces, dtype=bool))
self.strings.update(w.text for w in doc) | 0.00396 |
def _serial_connect(self, port, request):
'''
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
'''
# baudrate : int
# Baud rate such as 9600 or 115200 etc.
# bytesize : str, optional
# Number of data bits.
#
# Possible values: ``'FIVEBITS'``, ``'SIXBITS'``, ``'SEVENBITS'``,
# ``'EIGHTBITS'``.
#
# Default: ``'EIGHTBITS'``
# parity : str, optional
# Enable parity checking.
#
# Possible values: ``'PARITY_NONE'``, ``'PARITY_EVEN'``, ``'PARITY_ODD'``,
# ``'PARITY_MARK'``, ``'PARITY_SPACE'``.
#
# Default: ``'PARITY_NONE'``
# stopbits : str, optional
# Number of stop bits.
#
# Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO
# xonxoff : bool, optional
# Enable software flow control.
#
# Default: ``False``
# rtscts : bool, optional
# Enable hardware (RTS/CTS) flow control.
#
# Default: ``False``
# dsrdtr : bool, optional
# Enable hardware (DSR/DTR) flow control.
#
# Default: ``False``
command = 'connect'
if port in self.open_devices:
logger.debug('Already connected to: `%s`', port)
self._publish_status(port)
return
# TODO Write JSON schema definition for valid connect request.
if 'baudrate' not in request:
logger.error('Invalid `%s` request: `baudrate` must be '
'specified.', command)
return
if 'bytesize' in request:
try:
bytesize = getattr(serial, request['bytesize'])
if not bytesize in serial.Serial.BYTESIZES:
logger.error('`%s` request: `bytesize` `%s` not '
'available on current platform.', command,
request['bytesize'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `bytesize`, `%s`', command,
request['bytesize'])
return
else:
bytesize = serial.EIGHTBITS
if 'parity' in request:
try:
parity = getattr(serial, request['parity'])
if not parity in serial.Serial.PARITIES:
logger.error('`%s` request: `parity` `%s` not available '
'on current platform.', command,
request['parity'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `parity`, `%s`', command,
request['parity'])
return
else:
parity = serial.PARITY_NONE
if 'stopbits' in request:
try:
stopbits = getattr(serial, request['stopbits'])
if not stopbits in serial.Serial.STOPBITS:
logger.error('`%s` request: `stopbits` `%s` not '
'available on current platform.', command,
request['stopbits'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `stopbits`, `%s`', command,
request['stopbits'])
return
else:
stopbits = serial.STOPBITS_ONE
try:
baudrate = int(request['baudrate'])
xonxoff = bool(request.get('xonxoff'))
rtscts = bool(request.get('rtscts'))
dsrdtr = bool(request.get('dsrdtr'))
except TypeError as exception:
logger.error('`%s` request: %s', command, exception)
return
try:
device = serial.serial_for_url(port, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits, xonxoff=xonxoff,
rtscts=rtscts, dsrdtr=dsrdtr)
parent = self
class PassThroughProtocol(serial.threaded.Protocol):
PORT = port
def connection_made(self, transport):
"""Called when reader thread is started"""
parent.open_devices[port] = transport
parent._publish_status(self.PORT)
def data_received(self, data):
"""Called with snippets received from the serial port"""
parent.mqtt_client.publish(topic='serial_device/%s/received'
% self.PORT, payload=data)
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.error('Connection to port `%s` lost: %s',
self.PORT, exception)
del parent.open_devices[self.PORT]
parent._publish_status(self.PORT)
reader_thread = serial.threaded.ReaderThread(device,
PassThroughProtocol)
reader_thread.start()
reader_thread.connect()
except Exception as exception:
logger.error('`%s` request: %s', command, exception)
return | 0.001677 |
def registered_aliases(self):
"""Return the registered aliases exposed in BUILD files.
These returned aliases aren't so useful for actually parsing BUILD files.
They are useful for generating things like http://pantsbuild.github.io/build_dictionary.html.
:returns: A new BuildFileAliases instance containing this BuildConfiguration's registered alias
mappings.
:rtype: :class:`pants.build_graph.build_file_aliases.BuildFileAliases`
"""
target_factories_by_alias = self._target_by_alias.copy()
target_factories_by_alias.update(self._target_macro_factory_by_alias)
return BuildFileAliases(
targets=target_factories_by_alias,
objects=self._exposed_object_by_alias.copy(),
context_aware_object_factories=self._exposed_context_aware_object_factory_by_alias.copy()
) | 0.004762 |
def measure_torsion_angles(residues):
"""Calculates the dihedral angles for a list of backbone atoms.
Parameters
----------
residues : [ampal.Residue]
List of `Residue` objects.
Returns
-------
torsion_angles : (float, float, float)
One triple for each residue, containing torsion angles in
the range [-pi, pi].
[0] omega
[1] phi
[2] psi
For the first residue, omega and phi are not defined. For
the final residue, psi is not defined.
Raises
------
ValueError
If the number of input residues is less than 2.
"""
if len(residues) < 2:
torsion_angles = [(None, None, None)] * len(residues)
else:
torsion_angles = []
for i in range(len(residues)):
if i == 0:
res1 = residues[i]
res2 = residues[i + 1]
omega = None
phi = None
try:
psi = dihedral(
res1['N']._vector, res1['CA']._vector,
res1['C']._vector, res2['N']._vector)
except KeyError as k:
print("{0} atom missing - can't assign psi".format(k))
psi = None
torsion_angles.append((omega, phi, psi))
elif i == len(residues) - 1:
res1 = residues[i - 1]
res2 = residues[i]
try:
omega = dihedral(
res1['CA']._vector, res1['C']._vector,
res2['N']._vector, res2['CA']._vector)
except KeyError as k:
print("{0} atom missing - can't assign omega".format(k))
omega = None
try:
phi = dihedral(
res1['C']._vector, res2['N']._vector,
res2['CA']._vector, res2['C']._vector)
except KeyError as k:
print("{0} atom missing - can't assign phi".format(k))
phi = None
psi = None
torsion_angles.append((omega, phi, psi))
else:
res1 = residues[i - 1]
res2 = residues[i]
res3 = residues[i + 1]
try:
omega = dihedral(
res1['CA']._vector, res1['C']._vector,
res2['N']._vector, res2['CA']._vector)
except KeyError as k:
print("{0} atom missing - can't assign omega".format(k))
omega = None
try:
phi = dihedral(
res1['C']._vector, res2['N']._vector,
res2['CA']._vector, res2['C']._vector)
except KeyError as k:
print("{0} atom missing - can't assign phi".format(k))
phi = None
try:
psi = dihedral(
res2['N']._vector, res2['CA']._vector,
res2['C']._vector, res3['N']._vector)
except KeyError as k:
print("{0} atom missing - can't assign psi".format(k))
psi = None
torsion_angles.append((omega, phi, psi))
return torsion_angles | 0.000294 |
def make_content_node(channeldir, rel_path, filename, metadata):
"""
Create ContentNode based on the file extention and metadata provided.
"""
file_key, file_ext = os.path.splitext(filename)
ext = file_ext[1:]
kind = None
if ext in content_kinds.MAPPING:
kind = content_kinds.MAPPING[ext] # guess what kind based on file extension
elif 'questions' in metadata:
kind = content_kinds.EXERCISE
else:
raise ValueError('Could not find kind for extension ' + str(ext) + ' in content_kinds.MAPPING')
# Extract metadata fields
source_id = metadata.get('source_id', None)
if source_id is None:
source_id = metadata['chan_path']
filepath = os.path.join(rel_path, filename)
title = metadata['title']
description = metadata.get('description', None)
author = metadata.get('author', None)
lang = metadata.get('language', None)
license_dict = metadata.get('license', None)
thumbnail_chan_path = metadata.get('thumbnail_chan_path', None)
if thumbnail_chan_path:
thumbnail_rel_path = rel_path_from_chan_path(thumbnail_chan_path, channeldir)
else:
thumbnail_rel_path = None
if kind == VIDEO_NODE:
content_node = dict(
kind=VIDEO_NODE,
source_id=source_id,
title=title,
author=author,
description=description,
language=lang,
license=license_dict,
derive_thumbnail=True, # video-specific option
thumbnail=thumbnail_rel_path,
files=[{'file_type':VIDEO_FILE, 'path':filepath, 'language':lang}], # ffmpeg_settings={"crf": 24},
)
elif kind == AUDIO_NODE:
content_node = dict(
kind=AUDIO_NODE,
source_id=source_id,
title=title,
author=author,
description=description,
language=lang,
license=license_dict,
thumbnail=thumbnail_rel_path,
files=[{'file_type':AUDIO_FILE, 'path':filepath, 'language':lang}],
)
elif kind == DOCUMENT_NODE:
content_node = dict(
kind=DOCUMENT_NODE,
source_id=source_id,
title=title,
author=author,
description=description,
language=lang,
license=license_dict,
thumbnail=thumbnail_rel_path,
files=[]
)
if ext == 'pdf':
pdf_file = {
'file_type':DOCUMENT_FILE,
'path':filepath,
'language':lang
}
content_node['files'].append(pdf_file)
elif ext == 'epub':
epub_file = {
'file_type':EPUB_FILE,
'path':filepath,
'language':lang
}
content_node['files'].append(epub_file)
else:
raise ValueError('Ext {} not supported for kind {}'.format(ext, kind))
elif kind == HTML5_NODE:
content_node = dict(
kind=HTML5_NODE,
source_id=source_id,
title=title,
author=author,
description=description,
language=lang,
license=license_dict,
thumbnail=thumbnail_rel_path,
files=[{'file_type':HTML5_FILE, 'path':filepath, 'language':lang}],
)
elif kind == EXERCISE_NODE:
content_node = dict(
kind=EXERCISE_NODE,
source_id=source_id,
title=title,
author=author,
description=description,
language=lang,
license=license_dict,
exercise_data=metadata['exercise_data'],
questions=metadata['questions'],
thumbnail=thumbnail_rel_path,
files=[],
)
else:
raise ValueError('Not implemented case for kind ' + str(kind))
return content_node | 0.00609 |
def start_workunit(self, workunit):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
label_format = self._get_label_format(workunit)
if label_format == LabelFormat.FULL:
if not WorkUnitLabel.SUPPRESS_LABEL in workunit.labels:
self._emit_indented_workunit_label(workunit)
# Start output on a new line.
tool_output_format = self._get_tool_output_format(workunit)
if tool_output_format == ToolOutputFormat.INDENT:
self.emit(self._prefix(workunit, '\n'))
elif tool_output_format == ToolOutputFormat.UNINDENTED:
self.emit('\n')
elif label_format == LabelFormat.DOT:
self.emit('.')
self.flush() | 0.012431 |
def get_me(self, *args, **kwargs):
"""See :func:`get_me`"""
return get_me(*args, **self._merge_overrides(**kwargs)).run() | 0.014599 |
def any(pred: Callable, xs: Iterable):
"""
Check if at least one element of the iterable `xs`
fullfills predicate `pred`.
:param pred:
predicate function.
:param xs:
iterable object.
:returns: boolean
"""
b = find_first(pred, xs)
return True if b is not None else False | 0.003115 |
def partial_ratio(s1, s2):
""""Return the ratio of the most similar substring
as a number between 0 and 100."""
s1, s2 = utils.make_type_consistent(s1, s2)
if len(s1) <= len(s2):
shorter = s1
longer = s2
else:
shorter = s2
longer = s1
m = SequenceMatcher(None, shorter, longer)
blocks = m.get_matching_blocks()
# each block represents a sequence of matching characters in a string
# of the form (idx_1, idx_2, len)
# the best partial match will block align with at least one of those blocks
# e.g. shorter = "abcd", longer = XXXbcdeEEE
# block = (1,3,3)
# best score === ratio("abcd", "Xbcd")
scores = []
for block in blocks:
long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0
long_end = long_start + len(shorter)
long_substr = longer[long_start:long_end]
m2 = SequenceMatcher(None, shorter, long_substr)
r = m2.ratio()
if r > .995:
return 100
else:
scores.append(r)
return utils.intr(100 * max(scores)) | 0.000903 |
def gaps(args):
"""
%prog gaps A_vs_B.blast
Find distribution of gap sizes betwen adjacent HSPs.
"""
p = OptionParser(gaps.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
blast = BlastSlow(blastfile)
logging.debug("A total of {} records imported".format(len(blast)))
query_gaps = list(collect_gaps(blast))
subject_gaps = list(collect_gaps(blast, use_subject=True))
logging.debug("Query gaps: {} Subject gaps: {}"\
.format(len(query_gaps), len(subject_gaps)))
from jcvi.graphics.base import savefig
import seaborn as sns
sns.distplot(query_gaps)
savefig("query_gaps.pdf") | 0.004098 |
def append(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists."""
self.__items.append((key, value))
try:
dict_getitem(self, key).append(value)
except KeyError:
dict_setitem(self, key, [value]) | 0.006579 |
def upgradedb(options):
"""
Add 'fake' data migrations for existing tables from legacy GeoNode versions
"""
version = options.get('version')
if version in ['1.1', '1.2']:
sh("python manage.py migrate maps 0001 --fake")
sh("python manage.py migrate avatar 0001 --fake")
elif version is None:
print "Please specify your GeoNode version"
else:
print "Upgrades from version %s are not yet supported." % version | 0.002146 |
def _save_feed(cls, conf, benchmarks, data, revisions, revision_to_hash):
"""
Save the results as an Atom feed
"""
filename = os.path.join(conf.html_dir, 'regressions.xml')
# Determine publication date as the date when the benchmark
# was run --- if it is missing, use the date of the commit
run_timestamps = {}
revision_timestamps = {}
for results in iter_results(conf.results_dir):
if results.commit_hash not in revisions:
# revisions could be filtered when specifying a range
# in 'asv publish'
continue
revision = revisions[results.commit_hash]
revision_timestamps[revision] = results.date
# Time when the benchmark was run
for benchmark_name, timestamp in six.iteritems(results.ended_at):
key = (benchmark_name, revision)
run_timestamps[key] = timestamp
# Fallback to commit date
for benchmark_name in results.get_result_keys(benchmarks):
key = (benchmark_name, revision)
run_timestamps.setdefault(key, results.date)
# Generate feed entries
entries = []
for name, graph_path, graph_params, idx, last_value, best_value, jumps in data:
if '(' in name:
benchmark_name = name[:name.index('(')]
else:
benchmark_name = name
benchmark = benchmarks[benchmark_name]
if idx is not None:
graph_params = dict(graph_params)
# Add URL parameters
param_values, = itertools.islice(itertools.product(*benchmark['params']),
idx, idx + 1)
for k, v in zip(benchmark['param_names'], param_values):
graph_params['p-' + k] = v
for rev1, rev2, value1, value2 in jumps:
timestamps = (run_timestamps[benchmark_name, t] for t in (rev1, rev2) if t is not None)
last_timestamp = max(timestamps)
updated = datetime.datetime.fromtimestamp(last_timestamp/1000)
params = dict(graph_params)
if rev1 is None:
params['commits'] = '{0}'.format(revision_to_hash[rev2])
else:
params['commits'] = '{0}-{1}'.format(revision_to_hash[rev1],
revision_to_hash[rev2])
link = 'index.html#{0}?{1}'.format(benchmark_name, urlencode(params))
try:
best_percentage = "{0:.2f}%".format(100 * (last_value - best_value) / best_value)
except ZeroDivisionError:
best_percentage = "{0:.2g} units".format(last_value - best_value)
try:
percentage = "{0:.2f}%".format(100 * (value2 - value1) / value1)
except ZeroDivisionError:
percentage = "{0:.2g} units".format(value2 - value1)
jump_date = datetime.datetime.fromtimestamp(revision_timestamps[rev2]/1000)
jump_date_str = jump_date.strftime('%Y-%m-%d %H:%M:%S')
if rev1 is not None:
commit_a = revision_to_hash[rev1]
commit_b = revision_to_hash[rev2]
if 'github.com' in conf.show_commit_url:
commit_url = conf.show_commit_url + '../compare/' + commit_a + "..." + commit_b
else:
commit_url = conf.show_commit_url + commit_a
commit_ref = 'in commits <a href="{0}">{1}...{2}</a>'.format(commit_url,
commit_a[:8],
commit_b[:8])
else:
commit_a = revision_to_hash[rev2]
commit_url = conf.show_commit_url + commit_a
commit_ref = 'in commit <a href="{0}">{1}</a>'.format(commit_url, commit_a[:8])
unit = benchmark.get('unit', '')
best_value_str = util.human_value(best_value, unit)
last_value_str = util.human_value(last_value, unit)
value1_str = util.human_value(value1, unit)
value2_str = util.human_value(value2, unit)
title = "{percentage} {name}".format(**locals())
summary = """
<a href="{link}">{percentage} regression</a> on {jump_date_str} {commit_ref}.<br>
New value: {value2_str}, old value: {value1_str}.<br>
Latest value: {last_value_str} ({best_percentage} worse than best value {best_value_str}).
""".format(**locals()).strip()
# Information that uniquely identifies a regression
# --- if the values and the texts change on later
# runs, feed readers should is identify the regression
# as the same one, as long as the benchmark name and
# commits match.
id_context = [name, revision_to_hash.get(rev1, ""), revision_to_hash.get(rev2, "")]
entries.append(feed.FeedEntry(title, updated, link, summary, id_context))
entries.sort(key=lambda x: x.updated, reverse=True)
feed.write_atom(filename, entries,
title='{0} performance regressions'.format(conf.project),
author='Airspeed Velocity',
address='{0}.asv'.format(conf.project)) | 0.003839 |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ActivityContext for this ActivityInstance
:rtype: twilio.rest.taskrouter.v1.workspace.activity.ActivityContext
"""
if self._context is None:
self._context = ActivityContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
sid=self._solution['sid'],
)
return self._context | 0.006623 |
def tile_to_pixel(tile, centered=False):
"""Transform tile to pixel coordinates"""
pixel = [tile[0] * 256, tile[1] * 256]
if centered:
# should clip on max map size
pixel = [pix + 128 for pix in pixel]
return pixel[0], pixel[1] | 0.007067 |
def _check_ver(pyver, op, wanted):
'''
>>> _check_ver('2.7.15', 'gt', '2.7')
True
>>> _check_ver('2.7.15', 'gt', '2.7.15')
False
>>> _check_ver('2.7.15', 'ge', '2.7.15')
True
>>> _check_ver('2.7.15', 'eq', '2.7.15')
True
'''
pyver = distutils.version.LooseVersion(pyver)
wanted = distutils.version.LooseVersion(wanted)
return getattr(operator, '__{}__'.format(op))(pyver, wanted) | 0.00232 |
def register_module_classes(yaml: ruamel.yaml.YAML, modules: Optional[Iterable[Any]] = None) -> ruamel.yaml.YAML:
""" Register all classes in the given modules with the YAML object.
This is a simple helper function.
"""
# Validation
if modules is None:
modules = []
# Extract the classes from the modules
classes_to_register = set()
for module in modules:
module_classes = [member[1] for member in inspect.getmembers(module, inspect.isclass)]
classes_to_register.update(module_classes)
# Register the extracted classes
return register_classes(yaml = yaml, classes = classes_to_register) | 0.01072 |
def insertOutputModuleConfig(self, remoteConfig, migration=False):
"""
Insert Release version, application, parameter set hashes and the map(output module config).
"""
otptIdList = []
missingList = []
conn = self.dbi.connection()
try:
for c in remoteConfig:
cfgid = self.otptModCfgid.execute(conn, app = c["app_name"],
release_version = c["release_version"],
pset_hash = c["pset_hash"],
output_label = c["output_module_label"],
global_tag=c['global_tag'])
if cfgid <= 0 :
missingList.append(c)
else:
key = (c['app_name'] + ':' + c['release_version'] + ':' +
c['pset_hash'] + ':' +
c['output_module_label'] + ':' + c['global_tag'])
self.datasetCache['conf'][key] = cfgid
otptIdList.append(cfgid)
#print "About to set cfgid: %s" % str(cfgid)
except KeyError as ex:
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/insertOutputModuleConfig: \
KeyError exception: %s. " %ex.args[0], self.logger.exception,
"DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s. " %ex.args[0] )
except Exception as ex:
if conn:conn.close()
raise
if len(missingList)==0:
if conn:conn.close()
return otptIdList
#Now insert the missing configs
try:
#tran = conn.begin()
for m in missingList:
# Start a new transaction
# This is to see if we can get better results
# by committing early if we're submitting
# multiple blocks with similar features
tran = conn.begin()
#Now insert the config
# Sort out the mess
# We're having some problems with different threads
# committing different pieces at the same time
# This makes the output module config ID wrong
# Trying to catch this via exception handling on duplication
# Start a new transaction
#global_tag is now required. YG 03/08/2011
try:
cfgid = 0
if not migration:
m['create_by'] = dbsUtils().getCreateBy()
m['creation_date'] = dbsUtils().getTime()
configObj = {"release_version": m["release_version"],
"pset_hash": m["pset_hash"], "pset_name":m.get('pset_name', None),
"app_name": m["app_name"],
'output_module_label' : m['output_module_label'],
'global_tag' : m['global_tag'],
'scenario' : m.get('scenario', None),
'creation_date' : m['creation_date'],
'create_by':m['create_by']
}
self.otptModCfgin.execute(conn, configObj, tran)
tran.commit()
tran = None
except KeyError as ex:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/insertOutputModuleConfig: \
KeyError exception: %s. " %ex.args[0],
self.logger.exception,
"DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s. " %ex.args[0])
except exceptions.IntegrityError as ex:
#Another job inserted it just 1/100000 second earlier than
#you!! YG 11/17/2010
if str(ex).find("ORA-00001") != -1 or str(ex).lower().find("duplicate") !=-1:
if str(ex).find("TUC_OMC_1") != -1:
#the config is already in db, get the ID later
pass
else:
#reinsert it if one or two or three of the three attributes (vresion, hash and app) are inserted
#just 1/100000 second eailer.
try:
self.otptModCfgin.execute(conn, configObj, tran)
tran.commit()
tran = None
except exceptions.IntegrityError as ex:
if (str(ex).find("ORA-00001") != -1 and str(ex).find("TUC_OMC_1"))\
or str(ex).lower().find("duplicate") != -1:
pass
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert Configure. ',
self.logger.exception,
'Invalid data when insert Configure. '+ str(ex))
elif str(ex).find("ORA-01400") > -1:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler("dbsException-missing-data", "Missing data when inserting Configure. ",
self.logger.exception, str(ex))
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert Configure. ',
self.logger.exception,
'Invalid data when insert Configure. '+ str(ex))
except exceptions as ex3:
if tran:tran.rollback()
if conn:conn.close()
raise ex3
cfgid = self.otptModCfgid.execute(conn,
app = m["app_name"],
release_version = m["release_version"],
pset_hash = m["pset_hash"],
output_label = m["output_module_label"],
global_tag=m['global_tag'])
otptIdList.append(cfgid)
key = (m['app_name'] + ':' + m['release_version'] + ':' +
m['pset_hash'] + ':' +m['output_module_label'] + ':' +
m['global_tag'])
self.datasetCache['conf'][key] = cfgid
finally:
if tran:tran.rollback()
if conn:conn.close()
return otptIdList | 0.016425 |
def load_remote_trajectory(url, format=None):
'''Load a trajectory file from a remote location specified by *url*.
.. seealso:: load_remote_system
'''
from urllib import urlretrieve
filename, headers = urlretrieve(url)
load_trajectory(filename, format) | 0.007092 |
def LSR(value, amount, width):
"""
The ARM LSR (logical shift right) operation.
:param value: Value to shift
:type value: int or long or BitVec
:param int amount: How many bits to shift it.
:param int width: Width of the value
:return: Resultant value
:rtype int or BitVec
"""
if amount == 0:
return value
result, _ = LSR_C(value, amount, width)
return result | 0.002404 |
def get_chromosomes(snps):
""" Get the chromosomes of SNPs.
Parameters
----------
snps : pandas.DataFrame
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
"""
if isinstance(snps, pd.DataFrame):
return list(pd.unique(snps["chrom"]))
else:
return [] | 0.00542 |
def Download(campaign=0, queue='build', email=None, walltime=8, **kwargs):
'''
Submits a cluster job to the build queue to download all TPFs for a given
campaign.
:param int campaign: The `K2` campaign to run
:param str queue: The name of the queue to submit to. Default `build`
:param str email: The email to send job status notifications to. \
Default `None`
:param int walltime: The number of hours to request. Default `8`
'''
# Figure out the subcampaign
if type(campaign) is int:
subcampaign = -1
elif type(campaign) is float:
x, y = divmod(campaign, 1)
campaign = int(x)
subcampaign = round(y * 10)
# Submit the cluster job
pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'download.pbs')
str_w = 'walltime=%d:00:00' % walltime
str_v = 'EVEREST_DAT=%s,CAMPAIGN=%d,SUBCAMPAIGN=%d' % (
EVEREST_DAT, campaign, subcampaign)
if subcampaign == -1:
str_name = 'download_c%02d' % campaign
else:
str_name = 'download_c%02d.%d' % (campaign, subcampaign)
str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log')
qsub_args = ['qsub', pbsfile,
'-q', queue,
'-v', str_v,
'-o', str_out,
'-j', 'oe',
'-N', str_name,
'-l', str_w]
if email is not None:
qsub_args.append(['-M', email, '-m', 'ae'])
# Now we submit the job
print("Submitting the job...")
subprocess.call(qsub_args) | 0.000646 |
def query_by_tag(cat_id, kind='1'):
'''
Query recent posts of catalog.
'''
return TabPost.select().join(
TabPost2Tag,
on=(TabPost.uid == TabPost2Tag.post_id)
).where(
(TabPost.kind == kind) &
(TabPost2Tag.tag_id == cat_id)
).order_by(
TabPost.time_create.desc()
) | 0.005263 |
def leaveEvent( self, event ):
"""
Toggles the display for the tracker item.
"""
item = self.trackerItem()
if ( item ):
item.setVisible(False) | 0.03 |
def get_from_sources(self, index, doc_type, document_id):
"""Get source stored locally"""
return self.sources.get(index, {}).get(doc_type, {}).get(document_id, {}) | 0.01676 |
def set_ssl_logging(self, enable=False, func=_ssl_logging_cb):
u''' Enable or disable SSL logging
:param True | False enable: Enable or disable SSL logging
:param func: Callback function for logging
'''
if enable:
SSL_CTX_set_info_callback(self._ctx, func)
else:
SSL_CTX_set_info_callback(self._ctx, 0) | 0.005333 |
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF NSR Volume
Structure.
Parameters:
None.
Returns:
A string representing this UDF BEA Volume Strucutre.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure not initialized')
return struct.pack(self.FMT, 0, self.standard_ident, 1, b'\x00' * 2041) | 0.008163 |
def intervals_to_fragment_list(self, text_file, time_values):
"""
Transform a list of at least 4 time values
(corresponding to at least 3 intervals)
into a sync map fragment list and store it internally.
The first interval is a HEAD, the last is a TAIL.
For example:
time_values=[0.000, 1.000, 2.000, 3.456] => [(0.000, 1.000), (1.000, 2.000), (2.000, 3.456)]
:param text_file: the text file containing the text fragments associated
:type text_file: :class:`~aeneas.textfile.TextFile`
:param time_values: the time values
:type time_values: list of :class:`~aeneas.exacttiming.TimeValue`
:raises: TypeError: if ``text_file`` is not an instance of :class:`~aeneas.textfile.TextFile`
or ``time_values`` is not a list
:raises: ValueError: if ``time_values`` has length less than four
"""
if not isinstance(text_file, TextFile):
self.log_exc(u"text_file is not an instance of TextFile", None, True, TypeError)
if not isinstance(time_values, list):
self.log_exc(u"time_values is not a list", None, True, TypeError)
if len(time_values) < 4:
self.log_exc(u"time_values has length < 4", None, True, ValueError)
self.log(u"Converting time values to fragment list...")
begin = time_values[0]
end = time_values[-1]
self.log([u" Creating SyncMapFragmentList with begin %.3f and end %.3f", begin, end])
self.smflist = SyncMapFragmentList(
begin=begin,
end=end,
rconf=self.rconf,
logger=self.logger
)
self.log(u" Creating HEAD fragment")
self.smflist.add(SyncMapFragment(
# NOTE lines and filtered lines MUST be set,
# otherwise some output format might break
# when adding HEAD/TAIL to output
text_fragment=TextFragment(identifier=u"HEAD", lines=[], filtered_lines=[]),
begin=time_values[0],
end=time_values[1],
fragment_type=SyncMapFragment.HEAD
), sort=False)
self.log(u" Creating REGULAR fragments")
# NOTE text_file.fragments() returns a list,
# so we cache a copy here instead of
# calling it once per loop
fragments = text_file.fragments
for i in range(1, len(time_values) - 2):
self.log([u" Adding fragment %d ...", i])
self.smflist.add(SyncMapFragment(
text_fragment=fragments[i - 1],
begin=time_values[i],
end=time_values[i + 1],
fragment_type=SyncMapFragment.REGULAR
), sort=False)
self.log([u" Adding fragment %d ... done", i])
self.log(u" Creating TAIL fragment")
self.smflist.add(SyncMapFragment(
# NOTE lines and filtered lines MUST be set,
# otherwise some output format might break
# when adding HEAD/TAIL to output
text_fragment=TextFragment(identifier=u"TAIL", lines=[], filtered_lines=[]),
begin=time_values[len(time_values) - 2],
end=end,
fragment_type=SyncMapFragment.TAIL
), sort=False)
self.log(u"Converting time values to fragment list... done")
self.log(u"Sorting fragment list...")
self.smflist.sort()
self.log(u"Sorting fragment list... done")
return self.smflist | 0.002545 |
def _convert_date(self, date):
"""Convert '106/05/01' to '2017/05/01'"""
return '/'.join([str(int(date.split('/')[0]) + 1911)] + date.split('/')[1:]) | 0.018182 |
def flush_ct_inventory(self):
"""internal method used only if ct_inventory is enabled
"""
if hasattr(self, '_ct_inventory'):
# skip self from update
self._ct_inventory = None
self.update_view = False
self.save() | 0.007042 |
def build_plane_arrays(x, y, qlist):
"""Build a 2-D array out of data taken in the same plane, for contour
plotting.
"""
if type(qlist) is not list:
return_list = False
qlist = [qlist]
else:
return_list = True
xv = x[np.where(y==y[0])[0]]
yv = y[np.where(x==x[0])[0]]
qlistp = []
for n in range(len(qlist)):
qlistp.append(np.zeros((len(yv), len(xv))))
for j in range(len(qlist)):
for n in range(len(yv)):
i = np.where(y==yv[n])[0]
qlistp[j][n,:] = qlist[j][i]
if not return_list:
qlistp = qlistp[0]
return xv, yv, qlistp | 0.007576 |
def display(self):
"""A unicode value with the object's data, to be used for displaying
the object in your application."""
if self.title and self.organization:
disp = self.title + u' at ' + self.organization
else:
disp = self.title or self.organization or None
if disp and self.industry:
if self.date_range is not None:
disp += u' (%s, %d-%d)' % ((self.industry,) + \
self.date_range.years_range)
else:
disp += u' (%s)' % self.industry
else:
disp = ((disp or u'') + u' ' + (self.industry or u'')).strip()
if disp and self.date_range is not None:
disp += u' (%d-%d)' % self.date_range.years_range
return disp | 0.004739 |
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs)) | 0.00114 |
def getlist(self, key, default=[]):
"""
Returns: The list of values for <key> if <key> is in the dictionary,
else <default>. If <default> is not provided, an empty list is
returned.
"""
if key in self:
return [node.value for node in self._map[key]]
return default | 0.006042 |
def _include_pretrained_vocab(self, pretrained_word_dict, candidates):
"""
Include terms available via pretrained embeddings
:param pretrained_word_dict:
:param candidates:
:return:
"""
terms = Counter()
for c in candidates:
for w in c.get_parent().words:
if w in pretrained_word_dict:
terms[w] += 1
list(map(self.word_dict.get, terms)) | 0.004367 |
def _guess_lines(ys, max_lines=50, confidence_minimum=0.0):
"""guesses and returns text inter-line distance, number of lines, y_position of first line"""
ys = ys.astype(numpy.float32)
compactness_list, means_list, diffs, deviations = [], [], [], []
start_n = 1
for k in range(start_n, min(len(ys), max_lines)):
compactness, classified_points, means = cv2.kmeans(data=ys, K=k, bestLabels=None, criteria=(
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 1, 10), attempts=2, flags=cv2.KMEANS_PP_CENTERS)
means = numpy.sort(means, axis=0)
means_list.append(means)
compactness_list.append(compactness)
if k < 3:
tmp1 = [1, 2, 500, 550] # forge data for bad clusters
else:
# calculate the center of each cluster. Assuming lines are equally spaced...
tmp1 = numpy.diff(means, axis=0) # diff will be equal or very similar
tmp2 = numpy.std(tmp1) / numpy.mean(means) # so variance is minimal
tmp3 = numpy.sum((tmp1 - numpy.mean(tmp1)) ** 2) # root mean square deviation, more sensitive than std
diffs.append(tmp1)
deviations.append(tmp3)
compactness_list = numpy.diff(
numpy.log(numpy.array(compactness_list) + 0.01)) # sum small amount to avoid log(0)
deviations = numpy.array(deviations[1:])
deviations[0] = numpy.mean(deviations[1:])
compactness_list = (compactness_list - numpy.mean(compactness_list)) / numpy.std(compactness_list)
deviations = (deviations - numpy.mean(deviations)) / numpy.std(deviations)
aglomerated_metric = 0.1 * compactness_list + 0.9 * deviations
i = numpy.argmin(aglomerated_metric) + 1
lines = means_list[i]
# calculate confidence
betterness = numpy.sort(aglomerated_metric, axis=0)
confidence = (betterness[1] - betterness[0]) / (betterness[2] - betterness[1])
if confidence < confidence_minimum:
raise Exception("low confidence")
return lines | 0.006594 |
def from_dict(data, ctx):
"""
Instantiate a new HomeConversions from a dict (generally from loading a
JSON response). The data used to instantiate the HomeConversions is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('accountGain') is not None:
data['accountGain'] = ctx.convert_decimal_number(
data.get('accountGain')
)
if data.get('accountLoss') is not None:
data['accountLoss'] = ctx.convert_decimal_number(
data.get('accountLoss')
)
if data.get('positionValue') is not None:
data['positionValue'] = ctx.convert_decimal_number(
data.get('positionValue')
)
return HomeConversions(**data) | 0.00226 |
def get_move_data(move):
"""Return the index number for the given move name. Check moves.json in the same directory."""
srcpath = path.dirname(__file__)
try:
f = open(path.join(srcpath, 'moves.json'), 'r')
except IOError:
get_moves()
f = open(path.join(srcpath, 'moves.json'), 'r')
finally:
with f:
return json.load(f)[move] | 0.005155 |
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler) | 0.006329 |
def get_user(self):
"""Get the user informations from the server.
:return: a dict with all the informations
:rtype: dict
raises ValueError in case of protocol issues
:Example:
>>> "creationTime": <time>,
>>> "lastUpdateTime": <time>,
>>> "userId": "<email for login>",
>>> "title": 0,
>>> "firstName": "<First>",
>>> "lastName": "<Last>",
>>> "email": "<contact email>",
>>> "phoneNumber": "<phone>",
>>> "mobilePhone": "<mobile>",
>>> "locale": "<two char country code>"
:Warning:
The type and amount of values in the dictionary can change any time.
"""
header = BASE_HEADERS.copy()
header['Cookie'] = self.__cookie
request = requests.get(BASE_URL + 'getEndUser',
headers=header,
timeout=10)
if request.status_code != 200:
self.__logged_in = False
self.login()
self.get_user()
return
try:
result = request.json()
except ValueError:
raise Exception(
"Not a valid result for getEndUser, protocol error!")
return result['endUser'] | 0.001553 |
def dataset_initialize(self, folder):
""" initialize a folder with a a dataset configuration (metadata) file
Parameters
==========
folder: the folder to initialize the metadata file in
"""
if not os.path.isdir(folder):
raise ValueError('Invalid folder: ' + folder)
ref = self.config_values[self.CONFIG_NAME_USER] + '/INSERT_SLUG_HERE'
licenses = []
default_license = {'name': 'CC0-1.0'}
licenses.append(default_license)
meta_data = {
'title': 'INSERT_TITLE_HERE',
'id': ref,
'licenses': licenses
}
meta_file = os.path.join(folder, self.DATASET_METADATA_FILE)
with open(meta_file, 'w') as f:
json.dump(meta_data, f, indent=2)
print('Data package template written to: ' + meta_file)
return meta_file | 0.00222 |
def new_stats_exporter(options=None, interval=None):
"""Get a stats exporter and running transport thread.
Create a new `StackdriverStatsExporter` with the given options and start
periodically exporting stats to stackdriver in the background.
Fall back to default auth if `options` is null. This will raise
`google.auth.exceptions.DefaultCredentialsError` if default credentials
aren't configured.
See `opencensus.metrics.transport.get_exporter_thread` for details on the
transport thread.
:type options: :class:`Options`
:param exporter: Options to pass to the exporter
:type interval: int or float
:param interval: Seconds between export calls.
:rtype: :class:`StackdriverStatsExporter`
:return: The newly-created exporter.
"""
if options is None:
_, project_id = google.auth.default()
options = Options(project_id=project_id)
if str(options.project_id).strip() == "":
raise ValueError(ERROR_BLANK_PROJECT_ID)
ci = client_info.ClientInfo(client_library_version=get_user_agent_slug())
client = monitoring_v3.MetricServiceClient(client_info=ci)
exporter = StackdriverStatsExporter(client=client, options=options)
transport.get_exporter_thread(stats.stats, exporter, interval=interval)
return exporter | 0.000758 |
def status(self):
"""
check the status of the network and the peers
:return: network_height, peer_status
"""
peer = random.choice(self.PEERS)
formatted_peer = 'http://{}:4001'.format(peer)
peerdata = requests.get(url=formatted_peer + '/api/peers/').json()['peers']
peers_status = {}
networkheight = max([x['height'] for x in peerdata])
for i in peerdata:
if 'http://{}:4001'.format(i['ip']) in self.PEERS:
peers_status.update({i['ip']: {
'height': i['height'],
'status': i['status'],
'version': i['version'],
'delay': i['delay'],
}})
return {
'network_height': networkheight,
'peer_status': peers_status
} | 0.003517 |
def addthisbunch(bunchdt, data, commdct, thisbunch, theidf):
"""add a bunch to model.
abunch usually comes from another idf file
or it can be used to copy within the idf file"""
key = thisbunch.key.upper()
obj = copy.copy(thisbunch.obj)
abunch = obj2bunch(data, commdct, obj)
bunchdt[key].append(abunch)
return abunch | 0.002865 |
def ein(self):
"""Generate a random United States Employer Identification Number (EIN).
An United States An Employer Identification Number (EIN) is
also known as a Federal Tax Identification Number, and is
used to identify a business entity. EINs follow a format of a
two-digit prefix followed by a hyphen and a seven-digit sequence:
##-######
https://www.irs.gov/businesses/small-businesses-self-employed/employer-id-numbers
"""
# Only certain EIN Prefix values are assigned:
#
# https://www.irs.gov/businesses/small-businesses-self-employed/how-eins-are-assigned-and-valid-ein-prefixes
ein_prefix_choices = [
'01',
'02',
'03',
'04',
'05',
'06',
'10',
'11',
'12',
'13',
'14',
'15',
'16',
'20',
'21',
'22',
'23',
'24',
'25',
'26',
'27',
'30',
'31',
'32',
'33',
'34',
'35',
'36',
'37',
'38',
'39',
'40',
'41',
'42',
'43',
'44',
'45',
'46',
'47',
'48',
'50',
'51',
'52',
'53',
'54',
'55',
'56',
'57',
'58',
'59',
'60',
'61',
'62',
'63',
'64',
'65',
'66',
'67',
'68',
'71',
'72',
'73',
'74',
'75',
'76',
'77',
'80',
'81',
'82',
'83',
'84',
'85',
'86',
'87',
'88',
'90',
'91',
'92',
'93',
'94',
'95',
'98',
'99']
ein_prefix = random.choice(ein_prefix_choices)
sequence = self.random_int(min=0, max=9999999)
ein = "{0:s}-{1:07d}".format(ein_prefix, sequence)
return ein | 0.00125 |
def discover_yaml(bank=None, **meta):
"""Discovers the YAML format and registers it if available.
Install YAML support via PIP::
pip install PyYAML
:param bank: The format bank to register the format in
:param meta: Extra information associated with the format
"""
try:
import yaml
if bank is None:
bank = default_bank
bank.register('yaml', yaml.load, yaml.dump, **meta)
except ImportError:
pass | 0.002092 |
def abspath(*path):
"""A method to determine absolute path for a given relative path to the
directory where this setup.py script is located"""
setup_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(setup_dir, *path) | 0.003984 |
def inasafe_place_value_name(number, feature, parent):
"""Given a number, it will return the place value name.
For instance:
* inasafe_place_value_name(10) -> Ten \n
* inasafe_place_value_name(1700) -> Thousand
It needs to be used with inasafe_place_value_coefficient.
"""
_ = feature, parent # NOQA
if number is None:
return None
rounded_number = round_affected_number(
number,
use_rounding=True,
use_population_rounding=True
)
value, unit = denomination(rounded_number, 1000)
if not unit:
return None
else:
return unit['name'] | 0.00158 |
def dashboard(self, request):
"Basic dashboard panel"
# TODO: these should be ajax
module_set = []
for namespace, module in self.get_modules():
home_url = module.get_home_url(request)
if hasattr(module, 'render_on_dashboard'):
# Show by default, unless a permission is required
if not module.permission or request.user.has_perm(module.permission):
module_set.append((module.get_dashboard_title(), module.render_on_dashboard(request), home_url))
return self.render_to_response('nexus/dashboard.html', {
'module_set': module_set,
}, request) | 0.005908 |
def sv_variants(store, institute_obj, case_obj, variants_query, page=1, per_page=50):
"""Pre-process list of SV variants."""
skip_count = (per_page * max(page - 1, 0))
more_variants = True if variants_query.count() > (skip_count + per_page) else False
genome_build = case_obj.get('genome_build', '37')
if genome_build not in ['37','38']:
genome_build = '37'
return {
'variants': (parse_variant(store, institute_obj, case_obj, variant, genome_build=genome_build) for variant in
variants_query.skip(skip_count).limit(per_page)),
'more_variants': more_variants,
} | 0.007874 |
def set_exception(self, exception):
"""Sets the exception on the future."""
if not self.done():
raise TransferNotDoneError(
'set_exception can only be called once the transfer is '
'complete.')
self._coordinator.set_exception(exception, override=True) | 0.00627 |
def zSetSurfaceData(self, surfNum, radius=None, thick=None, material=None, semidia=None,
conic=None, comment=None):
"""Sets surface data"""
if self.pMode == 0: # Sequential mode
surf = self.pLDE.GetSurfaceAt(surfNum)
if radius is not None:
surf.pRadius = radius
if thick is not None:
surf.pThickness = thick
if material is not None:
surf.pMaterial = material
if semidia is not None:
surf.pSemiDiameter = semidia
if conic is not None:
surf.pConic = conic
if comment is not None:
surf.pComment = comment
else:
raise NotImplementedError('Function not implemented for non-sequential mode') | 0.008475 |
def get_sampletypes(self):
"""Returns the available SampleTypes of the system
"""
query = {
"portal_type": "SampleType",
"sort_on": "sortable_title",
"sort_order": "ascending",
"is_active": True,
}
results = api.search(query, "bika_setup_catalog")
return map(api.get_object, results) | 0.005277 |
def parse_list_line_windows(self, b):
"""
Parsing Microsoft Windows `dir` output
:param b: response line
:type b: :py:class:`bytes` or :py:class:`str`
:return: (path, info)
:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)
"""
line = b.decode(encoding=self.encoding).rstrip("\r\n")
date_time_end = line.index("M")
date_time_str = line[:date_time_end + 1].strip().split(" ")
date_time_str = " ".join([x for x in date_time_str if len(x) > 0])
line = line[date_time_end + 1:].lstrip()
with setlocale("C"):
strptime = datetime.datetime.strptime
date_time = strptime(date_time_str, "%m/%d/%Y %I:%M %p")
info = {}
info["modify"] = self.format_date_time(date_time)
next_space = line.index(" ")
if line.startswith("<DIR>"):
info["type"] = "dir"
else:
info["type"] = "file"
info["size"] = line[:next_space].replace(",", "")
if not info["size"].isdigit():
raise ValueError
# This here could cause a problem if a filename started with
# whitespace, but if we were to try to detect such a condition
# we would have to make strong assumptions about the input format
filename = line[next_space:].lstrip()
if filename == "." or filename == "..":
raise ValueError
return pathlib.PurePosixPath(filename), info | 0.001335 |
def epcrparse(self):
"""
Run BLAST, and record results to the object
"""
from Bio.Blast.Applications import NcbiblastnCommandline
while True:
sample, record, line = self.epcrparsequeue.get()
# Split the data on tabs
gene, chromosome, strand, start, end, m_match, gaps, act_len_exp_len = line.split('\t')
# Extract the gene sequence from the contigs
# The record dictionary has the contig name, and the sequence. Splice out the data using the start and
# end coordinates specified by ePCR
genesequence = record[chromosome][int(start) - 1:int(end)]
# Set up BLASTn using blastn-short, as the probe sequences tend to be very short
blastn = NcbiblastnCommandline(db=sample[self.analysistype].probes.split('.')[0],
num_threads=12,
task='blastn-short',
num_alignments=1,
outfmt="'6 qseqid sseqid positive mismatch gaps "
"evalue bitscore slen length'")
# Run the BLASTn, with the gene sequence as stdin
out, err = blastn(stdin=genesequence)
# Split the output string on tabs
results = out.rstrip().split('\t')
# Populate the raw blast results
sample[self.analysistype].rawblastresults[gene] = results
# Create named variables from the list
positives = float(results[2])
mismatches = float(results[3])
gaps = float(results[4])
subjectlength = float(results[7])
# Calculate the percent identity
percentidentity = float('{:0.2f}'.format((positives - gaps) / subjectlength * 100))
# Create a dictionary with the desired values to store in the metadata object
resultdict = {
'matches': positives,
'mismatches': mismatches,
'gaps': gaps,
'subject_length': subjectlength,
'percent_identity': percentidentity,
'match_length': results[8].split('\n')[0]
}
# Populate the metadata object with the dictionary
sample[self.analysistype].blastresults[gene] = resultdict
self.epcrparsequeue.task_done() | 0.004042 |
def _BuildQuery(self,
subject,
attribute=None,
timestamp=None,
limit=None,
is_prefix=False):
"""Build the SELECT query to be executed."""
args = []
subject = utils.SmartUnicode(subject)
criteria = "WHERE aff4.subject_hash=unhex(md5(%s))"
args.append(subject)
sorting = ""
tables = "FROM aff4"
# Set fields, tables, and criteria and append args
if attribute is not None:
if is_prefix:
tables += " JOIN attributes ON aff4.attribute_hash=attributes.hash"
prefix = attribute + "%"
criteria += " AND attributes.attribute like %s"
args.append(prefix)
else:
criteria += " AND aff4.attribute_hash=unhex(md5(%s))"
args.append(attribute)
# Limit to time range if specified
if isinstance(timestamp, (tuple, list)):
criteria += " AND aff4.timestamp >= %s AND aff4.timestamp <= %s"
args.append(int(timestamp[0]))
args.append(int(timestamp[1]))
fields = "aff4.value, aff4.timestamp"
if is_prefix:
fields += ", attributes.attribute"
# Modify fields and sorting for timestamps.
if timestamp is None or timestamp == self.NEWEST_TIMESTAMP:
tables += (" JOIN (SELECT attribute_hash, MAX(timestamp) timestamp "
"%s %s GROUP BY attribute_hash) maxtime ON "
"aff4.attribute_hash=maxtime.attribute_hash AND "
"aff4.timestamp=maxtime.timestamp") % (tables, criteria)
criteria = "WHERE aff4.subject_hash=unhex(md5(%s))"
args.append(subject)
else:
# Always order results.
sorting = "ORDER BY aff4.timestamp DESC"
# Add limit if set.
if limit:
sorting += " LIMIT %s" % int(limit)
query = " ".join(["SELECT", fields, tables, criteria, sorting])
return (query, args) | 0.009554 |
def _parse_file(self):
"""Preprocess and parse C file into an AST"""
# We need to set the CPU type to pull in the right register definitions
# only preprocess the file (-E) and get rid of gcc extensions that aren't
# supported in ISO C.
args = utilities.build_includes(self.arch.includes())
# args.append('-mcpu=%s' % self.arch.property('chip'))
args.append('-E')
args.append('-D__attribute__(x)=')
args.append('-D__extension__=')
self.ast = parse_file(self.filepath, use_cpp=True, cpp_path='arm-none-eabi-gcc', cpp_args=args) | 0.006579 |
def get_layergroup(self, name, workspace=None):
'''
returns a single layergroup object.
Will return None if no layergroup is found.
Will raise an error if more than one layergroup with the same name is found.
'''
layergroups = self.get_layergroups(names=name, workspaces=workspace)
return self._return_first_item(layergroups) | 0.007732 |
def get_document_field_display(self, field_name, field):
""" Render a link to a document """
document = getattr(self.instance, field_name)
if document:
return mark_safe(
'<a href="%s">%s <span class="meta">(%s, %s)</span></a>' % (
document.url,
document.title,
document.file_extension.upper(),
filesizeformat(document.file.size),
)
)
return self.model_admin.get_empty_value_display() | 0.00363 |
def read_table(self):
"""
Read an AMQP table, and return as a Python dictionary.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise UnicodeDecodeError if the text is mal-formed.
Will raise struct.error if the data is malformed
"""
# Only need to check underflow on the table once
tlen = self.read_long()
self._check_underflow(tlen)
end_pos = self._pos + tlen
result = {}
while self._pos < end_pos:
name = self._field_shortstr()
result[name] = self._read_field()
return result | 0.003155 |
def add_effect(self, effect):
"""
Add an Effect to the Frame.
:param effect: The Effect to be added.
"""
effect.register_scene(self._scene)
self._effects.append(effect) | 0.009217 |
def compute_pblum_scale(self, dataset, pblum, **kwargs):
"""
intensities should already be computed for this dataset at the time for which pblum is being provided
TODO: add documentation
"""
logger.debug("{}.compute_pblum_scale(dataset={}, pblum={})".format(self.component, dataset, pblum))
abs_luminosity = self.compute_luminosity(dataset, **kwargs)
# We now want to remember the scale for all intensities such that the
# luminosity in relative units gives the provided pblum
pblum_scale = pblum / abs_luminosity
self.set_pblum_scale(dataset, pblum_scale) | 0.00625 |
def parse(schema):
"""
Parse `schema`, either a string or a file-like object, and
return a :class:`MessageRegistry` with the loaded messages.
"""
if not isinstance(schema, basestring):
# assume it is file-like
schema = schema.read()
message = re.compile(r'^\(([^,]+),\s*(\d+)\):\s*$')
field = re.compile(r'^-\s*([^:]+):\s+(.+?)\s*$')
registry = MessageRegistry({})
messages = registry.messages
curr = None
names = None
for lineno, line in enumerate(schema.split('\n')):
line = line.strip()
if '#' in line:
line = line[:line.index('#')]
if line == '':
continue
f = field.match(line)
if f:
if curr is None:
raise ParseError(
'field definition outside of message at line %d' % lineno)
name = f.group(1)
type = f.group(2)
if name not in names:
f = Field(curr, name, type)
curr.fields.append(f)
names.add(name)
continue
else:
raise ParseError(
'duplicate field name "%s" at line %d' % (name, lineno))
m = message.match(line)
if m:
# new message definition
name, vers = m.group(1), int(m.group(2))
if (name, vers) in messages:
raise ParseError('Duplicate message (%s, %d)' % (name, vers))
curr = messages[(name, vers)] = Message(registry, name, vers, [])
names = set()
continue
for message in registry.messages.values():
message.fields = tuple(message.fields)
return registry | 0.000582 |
def report_exception(self, http_context=None, user=None):
""" Reports the details of the latest exceptions to Stackdriver Error
Reporting.
:type http_context: :class`google.cloud.error_reporting.HTTPContext`
:param http_context: The HTTP request which was processed when the
error was triggered.
:type user: str
:param user: The user who caused or was affected by the crash. This
can be a user ID, an email address, or an arbitrary
token that uniquely identifies the user. When sending an
error report, leave this field empty if the user was
not logged in. In this case the Error Reporting system
will use other data, such as remote IP address,
to distinguish affected users.
Example::
>>> try:
>>> raise NameError
>>> except Exception:
>>> client.report_exception()
"""
self._send_error_report(
traceback.format_exc(), http_context=http_context, user=user
) | 0.001631 |
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d | 0.005025 |
def draw(board, term, cells):
"""Draw a board to the terminal."""
for (x, y), state in board.iteritems():
with term.location(x, y):
print cells[state], | 0.005587 |
def glob_all(folder: str, filt: str) -> List[str]:
"""Recursive glob"""
import os
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, filt):
matches.append(os.path.join(root, filename))
return matches | 0.003205 |
def get_sorted_nts_omit_section(self, hdrgo_prt, hdrgo_sort):
"""Return a flat list of sections (wo/section names) with GO terms grouped and sorted."""
nts_flat = []
# print("SSSS SorterNts:get_sorted_nts_omit_section(hdrgo_prt={}, hdrgo_sort={})".format(
# hdrgo_prt, hdrgo_sort))
hdrgos_seen = set()
hdrgos_actual = self.sortgos.grprobj.get_hdrgos()
for _, section_hdrgos_all in self.sections:
#section_hdrgos_act = set(section_hdrgos_all).intersection(hdrgos_actual)
section_hdrgos_act = [h for h in section_hdrgos_all if h in hdrgos_actual]
hdrgos_seen |= set(section_hdrgos_act)
self.sortgos.get_sorted_hdrgo2usrgos(
section_hdrgos_act, nts_flat, hdrgo_prt, hdrgo_sort)
remaining_hdrgos = set(self.sortgos.grprobj.get_hdrgos()).difference(hdrgos_seen)
self.sortgos.get_sorted_hdrgo2usrgos(remaining_hdrgos, nts_flat, hdrgo_prt, hdrgo_sort)
return nts_flat | 0.008929 |
def _get_distance_segment_coefficients(self, rval):
"""
Returns the coefficients describing the distance attenuation shape
for three different distance bins, equations 12a - 12c
"""
# Get distance segment ends
nsites = len(rval)
# Equation 12a
f_0 = np.log10(self.CONSTS["r0"] / rval)
f_0[rval > self.CONSTS["r0"]] = 0.0
# Equation 12b
f_1 = np.log10(rval)
f_1[rval > self.CONSTS["r1"]] = np.log10(self.CONSTS["r1"])
# Equation 12c
f_2 = np.log10(rval / self.CONSTS["r2"])
f_2[rval <= self.CONSTS["r2"]] = 0.0
return f_0, f_1, f_2 | 0.003035 |
def find_focusable(node):
"""
Search for the first focusable window within the node tree
"""
if not node.children:
return node
if node.focus:
return find_focusable(node.children_dict[node.focus[0]]) | 0.004237 |
def reformat(self, dtstring, before, after):
"""Edit the time string format.
See https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
for all format string options.
**中文文档**
将datetime string从一种格式转换成另一种格式。
"""
a_datetime = datetime.strptime(dtstring, before)
return datetime.strftime(a_datetime, after) | 0.014354 |
def FromBinary(cls, record_data, record_count=1):
"""Create an UpdateRecord subclass from binary record data.
This should be called with a binary record blob (NOT including the
record type header) and it will decode it into a SetGraphOnlineRecord.
Args:
record_data (bytearray): The raw record data that we wish to parse
into an UpdateRecord subclass NOT including its 8 byte record header.
record_count (int): The number of records included in record_data.
Raises:
ArgumentError: If the record_data is malformed and cannot be parsed.
Returns:
SetGraphOnlineRecord: The decoded reflash tile record.
"""
_cmd, address, _resp_length, payload = cls._parse_rpc_info(record_data)
try:
online, = struct.unpack("<H", payload)
online = bool(online)
except ValueError:
raise ArgumentError("Could not decode payload for set_online record", payload=payload)
return SetGraphOnlineRecord(online, address) | 0.004608 |
def set_log_level(self):
"""
Set log level according to command-line options
@returns: logger object
"""
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
elif self.options.quiet:
self.logger.setLevel(logging.ERROR)
else:
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler())
return self.logger | 0.004525 |
def next(self):
"""Trigger next agent to :py:meth:`~creamas.core.CreativeAgent.act` in
the current step.
"""
# all agents acted, init next step
t = time.time()
if len(self._agents_to_act) == 0:
self._init_step()
addr = self._agents_to_act.pop(0)
aiomas.run(until=self.env.trigger_act(addr=addr))
t2 = time.time()
self._step_processing_time += t2 - t
# all agents acted, finalize current step
if len(self._agents_to_act) == 0:
self._finalize_step() | 0.003515 |
def tcache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time with support tags.
Usage::
{% tcache [expire_time] [fragment_name] [tags='tag1,tag2'] %}
.. some expensive processing ..
{% endtcache %}
This tag also supports varying by a list of arguments:
{% tcache [expire_time] [fragment_name] [var1] [var2] .. [tags=tags] %}
.. some expensive processing ..
{% endtcache %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endtcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise template.TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
tags = None
if len(tokens) > 3 and 'tags=' in tokens[-1]:
tags = parser.compile_filter(tokens[-1][5:])
del tokens[-1]
return CacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(token) for token in tokens[3:]],
tags
) | 0.003428 |
def merge(self, other_tc):
"""
Return the top-left ``<w:tc>`` element of a new span formed by
merging the rectangular region defined by using this tc element and
*other_tc* as diagonal corners.
"""
top, left, height, width = self._span_dimensions(other_tc)
top_tc = self._tbl.tr_lst[top].tc_at_grid_col(left)
top_tc._grow_to(width, height)
return top_tc | 0.004706 |
def unbroadcast(a, b):
'''
unbroadcast(a, b) yields a tuple (aa, bb) that is equivalent to (a, b) except that aa and bb
have been reshaped such that arithmetic numpy operations such as aa * bb will result in
row-wise operation instead of column-wise broadcasting.
'''
# they could be sparse:
spa = sps.issparse(a)
spb = sps.issparse(b)
if spa and spb: return (a,b)
elif spa or spb:
def fix(sp,nm):
nm = np.asarray(nm)
dnm = len(nm.shape)
nnm = np.prod(nm.shape)
# if we have (sparse matrix) * (high-dim array), unbroadcast the dense array
if dnm == 0: return (sp, np.reshape(nm, (1, 1)))
elif dnm == 1: return (sp, np.reshape(nm, (nnm, 1)))
elif dnm == 2: return (sp, nm)
else: return unbroadcast(sp.toarray(), nm)
return fix(a, b) if spa else tuple(reversed(fix(b, a)))
# okay, no sparse matrices found:
a = np.asarray(a)
b = np.asarray(b)
da = len(a.shape)
db = len(b.shape)
if da > db: return (a, np.reshape(b, b.shape + tuple(np.ones(da-db, dtype=np.int))))
elif da < db: return (np.reshape(a, a.shape + tuple(np.ones(db-da, dtype=np.int))), b)
else: return (a, b) | 0.017161 |
def iterRun(self, sqlTail = '', raw = False) :
"""Compile filters and run the query and returns an iterator. This much more efficient for large data sets but
you get the results one element at a time. One thing to keep in mind is that this function keeps the cursor open, that means that the sqlite databae is locked (no updates/inserts etc...) until all
the elements have been fetched. For batch updates to the database, preload the results into a list using get, then do you updates.
You can use sqlTail to add things such as order by
If raw, returns the raw tuple data (not wrapped into a raba object)"""
sql, sqlValues = self.getSQLQuery()
cur = self.con.execute('%s %s'% (sql, sqlTail), sqlValues)
for v in cur :
if not raw :
yield RabaPupa(self.rabaClass, v[0])
else :
yield v | 0.03198 |
def add_path_with_storage_account(self, remote_path, storage_account):
# type: (SourcePath, str, str) -> None
"""Add a path with an associated storage account
:param SourcePath self: this
:param str remote_path: remote path
:param str storage_account: storage account to associate with path
"""
if len(self._path_map) >= 1:
raise RuntimeError(
'cannot add multiple remote paths to SourcePath objects')
rpath = blobxfer.util.normalize_azure_path(remote_path)
self.add_path(rpath)
self._path_map[rpath] = storage_account | 0.004792 |
def _create_embedded_indices(self):
'''
Create indices for all the embedded structs. For parser internal use.
'''
try:
self._target._embedded_indices.update(((k,(self,v)) for k,v in getattr(self._parser.typedef, 'inline_names', {}).items()))
except AttributeError:
pass | 0.018018 |
def __add_tier(self, tier, token_tier_name):
"""
adds a tier to the document graph (either as additional attributes
to the token nodes or as a span node with outgoing edges to the token
nodes it represents)
"""
if tier.attrib['category'] == token_tier_name:
self.__add_tokens(tier)
else:
if self.is_token_annotation_tier(tier):
self.__add_token_annotation_tier(tier)
else:
self.__add_span_tier(tier) | 0.003817 |
def unbind(self, *args):
"""Unsubscribes from events or :class:`~pydispatch.properties.Property` updates
Multiple arguments can be given. Each of which can be either the method
that was used for the original call to :meth:`bind` or an instance
object.
If an instance of an object is supplied, any previously bound Events and
Properties will be 'unbound'.
"""
props = self.__property_events.values()
events = self.__events.values()
for arg in args:
for prop in props:
prop.remove_listener(arg)
for e in events:
e.remove_listener(arg) | 0.00597 |
def accumulator(init, update):
"""
Generic accumulator function.
.. code-block:: python
# Simplest Form
>>> a = 'this' + ' '
>>> b = 'that'
>>> c = functools.reduce(accumulator, a, b)
>>> c
'this that'
# The type of the initial value determines output type.
>>> a = 5
>>> b = Hello
>>> c = functools.reduce(accumulator, a, b)
>>> c
10
:param init: Initial Value
:param update: Value to accumulate
:return: Combined Values
"""
return (
init + len(update)
if isinstance(init, int) else
init + update
) | 0.003008 |
def handshake_timed_out(self):
"""
Checks if the handshake has timed out.
If `start_handshake` wasn't called before the call to this function,
the return value will always be `False`. If the handshake completed
before a timeout was reached, the return value will be `False`
:return: handshake time out status, as a `bool`
"""
if not self.__timer:
return False
if self.__handshake_complete:
return False
return self.__timer_expired | 0.003731 |
def get_action(self):
"""Returns the action to be taken from the request. Returns None if no
action is found
"""
action = self.request.get("workflow_action_id", None)
action = self.request.get("workflow_action", action)
if not action:
return None
# A condition in the form causes Plone to sometimes send two actions
# This usually happens when the previous action was not managed properly
# and the request was not able to complete, so the previous form value
# is kept, together with the new one.
if type(action) in (list, tuple):
actions = list(set(action))
if len(actions) > 0:
logger.warn("Multiple actions in request: {}. Fallback to '{}'"
.format(repr(actions), actions[-1]))
action = actions[-1]
return action | 0.003315 |
def ceil(x, context=None):
"""
Return the next higher or equal integer to x.
If the result is not exactly representable, it will be rounded according to
the current context. Note that the rounding step means that it's possible
for the result to be smaller than ``x``. For example::
>>> x = 2**100 + 1
>>> ceil(2**100 + 1) >= x
False
One way to be sure of getting a result that's greater than or equal to
``x`` is to use the ``RoundTowardPositive`` rounding mode::
>>> with RoundTowardPositive:
... x = 2**100 + 1
... ceil(x) >= x
...
True
Similar comments apply to the :func:`floor`, :func:`round` and
:func:`trunc` functions.
.. note::
This function corresponds to the MPFR function ``mpfr_rint_ceil``,
not to ``mpfr_ceil``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_rint_ceil,
(BigFloat._implicit_convert(x),),
context,
) | 0.000973 |
def _relevant_checkers(self, path):
"""
Get set of checkers for the given path.
TODO: currently this is based off the file extension. We would like to
honor magic bits as well, so that python binaries, shell scripts, etc
but we're not guaranteed that `path` currently exists on the filesystem
-- e.g. when version control for historical revs is used.
"""
_, ext = os.path.splitext(path)
ext = ext.lstrip('.')
return checkers.checkers.get(ext, []) | 0.003788 |
def authenticate(self):
"""Authenticate user by any means and return either true or false.
Args:
Returns:
tuple (is_valid, username): True is valid user, False if not
"""
basic_auth = request.authorization
is_valid = False
user = None
if basic_auth:
is_valid, user = self.check_basic_auth(
basic_auth.username, basic_auth.password
)
else: # Try token auth
token = request.headers.get('Authorization', None)
param_token = request.args.get('access_token')
if token or param_token:
if token:
# slice the 'token ' piece of the header (following
# github style):
token = token[6:]
else:
# Grab it from query dict instead
token = param_token
log.debug('Received token: %s', token)
is_valid, user = self.check_token_auth(token)
return (is_valid, user) | 0.001852 |
def bulk_update(object_list, ignore_errors=False, delete_first=False, verbosity=0):
'''Bulk_create objects in provided list of model instances, delete database rows for the original pks in the object list.
Returns any delta in the number of rows in the database table that resulted from the update.
If nonzero, an error has likely occurred and database integrity is suspect.
# delete_first = True is required if your model has unique constraints that would be violated by creating duplicate records
# FIXME: check for unique constraints and raise exception if any exist (won't work because new objects may violate!)
'''
if not object_list:
return 0
model = object_list[0].__class__
N_before = model.objects.count()
pks_to_delete = set()
for i, obj in enumerate(object_list):
pks_to_delete.add(obj.pk)
if delete_first:
object_list[i] = deepcopy(obj)
object_list[i].pk = None
if verbosity > 1:
print 'Creating %d %r objects.' % (len(object_list), model)
print 'BEFORE: %d' % model.objects.count()
if not delete_first:
model.objects.bulk_create(object_list)
if verbosity > 0:
print 'Deleting %d objects with pks: %r ........' % (len(pks_to_delete), pks_to_delete)
objs_to_delete = model.objects.filter(pk__in=pks_to_delete)
num_to_delete = objs_to_delete.count()
if num_to_delete != len(pks_to_delete):
msg = 'Attempt to delete redundant pks (len %d)! Queryset has count %d. Query was `filter(pk__in=%r). Queryset = %r' % (
len(pks_to_delete), num_to_delete, pks_to_delete, objs_to_delete)
if ignore_errors:
if verbosity > 0:
print msg
else:
raise RuntimeError(msg)
if verbosity > 1:
print 'Queryset to delete has %d objects' % objs_to_delete.count()
objs_to_delete.delete()
if delete_first:
model.objects.bulk_create(object_list)
if verbosity > 1:
print 'AFTER: %d' % model.objects.count()
N_after = model.objects.count()
if ignore_errors:
if verbosity > 1:
print 'AFTER: %d' % N_after
else:
if N_after != N_before:
print 'Number of records in %r changed by %d during bulk_create of %r.\n ' % (model, N_after - N_before, object_list)
msg = 'Records before and after bulk_create are not equal!!! Before=%d, After=%d' % (N_before, N_after)
raise RuntimeError(msg)
return N_before - N_after | 0.003956 |
def run(cmd, output_fpath=None, input_fpath=None, checks=None, stdout_to_outputfile=True,
stdout_tx=True, reuse=False, env_vars=None):
"""Run the provided command, logging details and checking for errors.
"""
if output_fpath and reuse:
if verify_file(output_fpath, silent=True):
info(output_fpath + ' exists, reusing')
return output_fpath
if not output_fpath.endswith('.gz') and verify_file(output_fpath + '.gz', silent=True):
info(output_fpath + '.gz exists, reusing')
return output_fpath
env = os.environ.copy()
if env_vars:
for k, v in env_vars.items():
if v is None:
if k in env:
del env[k]
else:
env[k] = v
if checks is None:
checks = [file_nonempty_check]
def _try_run(_cmd, _output_fpath, _input_fpath):
try:
info(' '.join(str(x) for x in _cmd) if not isinstance(_cmd, six.string_types) else _cmd)
_do_run(_cmd, checks, env, _output_fpath, _input_fpath)
except:
raise
if output_fpath:
if isfile(output_fpath):
os.remove(output_fpath)
if output_fpath:
if stdout_tx:
with file_transaction(None, output_fpath) as tx_out_file:
if stdout_to_outputfile:
cmd += ' > ' + tx_out_file
else:
cmd += '\n'
cmd = cmd.replace(' ' + output_fpath + ' ', ' ' + tx_out_file + ' ') \
.replace(' "' + output_fpath + '" ', ' ' + tx_out_file + '" ') \
.replace(' \'' + output_fpath + '\' ', ' ' + tx_out_file + '\' ') \
.replace(' ' + output_fpath + '\n', ' ' + tx_out_file) \
.replace(' "' + output_fpath + '"\n', ' ' + tx_out_file + '"') \
.replace(' \'' + output_fpath + '\'\n', ' ' + tx_out_file + '\'') \
.replace('\n', '')
_try_run(cmd, tx_out_file, input_fpath)
else:
_try_run(cmd, output_fpath, input_fpath)
else:
_try_run(cmd, None, input_fpath) | 0.004891 |
def externals_finder(dirname, filename):
"""Find any 'svn:externals' directories"""
found = False
f = open(filename,'rt')
for line in iter(f.readline, ''): # can't use direct iter!
parts = line.split()
if len(parts)==2:
kind,length = parts
data = f.read(int(length))
if kind=='K' and data=='svn:externals':
found = True
elif kind=='V' and found:
f.close()
break
else:
f.close()
return
for line in data.splitlines():
parts = line.split()
if parts:
yield joinpath(dirname, parts[0]) | 0.010542 |
def select(self, *args, **kwargs):
"""Python3 raises `ValueError` if socket is closed, because fd == -1"""
try:
return super(PatroniSequentialThreadingHandler, self).select(*args, **kwargs)
except ValueError as e:
raise select.error(9, str(e)) | 0.010309 |
def process_edge_flow(self, source, sink, i, j, algo, q):
'''
API: process_edge_flow(self, source, sink, i, j, algo, q)
Description:
Used by by max_flow_preflowpush() method. Processes edges along
prefolow push.
Input:
source: Source node name of flow graph.
sink: Sink node name of flow graph.
i: Source node in the processed edge (tail of arc).
j: Sink node in the processed edge (head of arc).
Post:
The 'flow' and 'excess' attributes of nodes may get updated.
Return:
Returns False if residual capacity is 0, True otherwise.
'''
if (self.get_node_attr(i, 'distance') !=
self.get_node_attr(j, 'distance') + 1):
return False
if (i, j) in self.edge_attr:
edge = (i, j)
capacity = self.get_edge_attr(i, j, 'capacity')
mult = 1
else:
edge = (j, i)
capacity = 0
mult = -1
flow = mult*self.edge_attr[edge]['flow']
residual_capacity = capacity - flow
if residual_capacity == 0:
return False
excess_i = self.get_node_attr(i, 'excess')
excess_j = self.get_node_attr(j, 'excess')
push_amount = min(excess_i, residual_capacity)
self.edge_attr[edge]['flow'] = mult*(flow + push_amount)
self.set_node_attr(i, 'excess', excess_i - push_amount)
self.set_node_attr(j, 'excess', excess_j + push_amount)
return True | 0.001928 |
def usage(self, subcommand):
"""
Returns *how to use command* text.
"""
usage = ' '.join(['%prog', subcommand, '[options]'])
if self.args:
usage = '%s %s' % (usage, str(self.args))
return usage | 0.007905 |
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255) | 0.001514 |
def prune_urls(url_set, start_url, allowed_list, ignored_list):
"""Prunes URLs that should be ignored."""
result = set()
for url in url_set:
allowed = False
for allow_url in allowed_list:
if url.startswith(allow_url):
allowed = True
break
if not allowed:
continue
ignored = False
for ignore_url in ignored_list:
if url.startswith(ignore_url):
ignored = True
break
if ignored:
continue
prefix, suffix = (url.rsplit('.', 1) + [''])[:2]
if suffix.lower() in IGNORE_SUFFIXES:
continue
result.add(url)
return result | 0.001372 |
def clean_traceback(tb):
'''Fixes up the traceback to remove the from the file paths the part
preceeding the project root.
@param tb: C{str}
@rtype: C{str}'''
prefix = __file__[:__file__.find("feat/common/error.py")]
regex = re.compile("(\s*File\s*\")(%s)([a-zA-Z-_\. \\/]*)(\".*)"
% prefix.replace("\\", "\\\\"))
def cleanup(line):
m = regex.match(line)
if m:
return m.group(1) + ".../" + m.group(3) + m.group(4)
else:
return line
return '\n'.join(map(cleanup, tb.split('\n'))) | 0.006861 |
def page_factory(request):
""" Page factory.
Config models example:
.. code-block:: python
models = {
'': [WebPage, CatalogResource],
'catalogue': CatalogResource,
'news': NewsResource,
}
"""
prefix = request.matchdict['prefix'] # /{prefix}/page1/page2/page3...
settings = request.registry.settings
dbsession = settings[CONFIG_DBSESSION]
config = settings[CONFIG_MODELS]
if prefix not in config:
# prepend {prefix} to *traverse
request.matchdict['traverse'] =\
tuple([prefix] + list(request.matchdict['traverse']))
prefix = None
# Get all resources and models from config with the same prefix.
resources = config.get(
prefix, config.get( # 1. get resources with prefix same as URL prefix
'', config.get( # 2. if not, then try to get empty prefix
'/', None))) # 3. else try to get prefix '/' otherwise None
if not hasattr(resources, '__iter__'):
resources = (resources, )
tree = {}
if not resources:
return tree
# Add top level nodes of resources in the tree
for resource in resources:
table = None
if not hasattr(resource, '__table__')\
and hasattr(resource, 'model'):
table = resource.model
else:
table = resource
if not hasattr(table, 'slug'):
continue
nodes = dbsession.query(table)
if hasattr(table, 'parent_id'):
nodes = nodes.filter(or_(
table.parent_id == None, # noqa
table.parent.has(table.slug == '/')
))
for node in nodes:
if not node.slug:
continue
resource = resource_of_node(resources, node)
tree[node.slug] = resource(node, prefix=prefix)
return tree | 0.000524 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.