text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def json_serial(obj):
"""
Custom JSON serializer for objects not serializable by default.
"""
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
raise TypeError('Type {} not serializable.'.format(type(obj))) | 0.00692 |
def build_module(
name, source=None, *, sources=None, preprocess=None, output=None, output_dir='.',
build_dir='build', include_dirs=None, library_dirs=None, libraries=None, macros=None,
compiler_preargs=None, compiler_postargs=None, linker_preargs=None, linker_postargs=None, cache=True):
'''
Args:
name (str): The module name (must be unique).
source (str): the source code in C++.
Keyword Args:
sources (str): Source files.
preprocess (str): Source files that need cfly's preprocessing.
build_dir (str): The build directory. defaults to 'build'.
output (str): The output file. defaults to '{name}.{ext}'.
output_dir (str): The output directory. defaults to '.'.
build_dir (str): The build directory. defaults to 'build'.
include_dirs (list): Additional include directories.
library_dirs (list): Additional library directories.
libraries (list): Additional libraries.
macros (list): Predefined macros. (name, value) pairs.
compiler_preargs (list): Compiler preargs.
compiler_postargs (list): Compiler postargs.
linker_preargs (list): Linker preargs.
linker_postargs (list): Linker postargs.
cache (bool): Enable cache.
Returns:
the compiled and imported module.
'''
if output is None:
output = name + get_config_var('EXT_SUFFIX')
if sources is None:
sources = []
if preprocess is None:
preprocess = []
if source is None:
source = ''
preprocess_set = set(preprocess)
sources = [x for x in sources if x not in preprocess_set]
if source and sources + preprocess:
raise ValueError('invalid arguments')
os.makedirs(build_dir, exist_ok=True)
module_home = os.path.join(build_dir, 'temp', name)
old_checksum = readall(module_home, 'args.txt')
checksum = args_checksum(
name,
source,
sources,
preprocess,
output,
output_dir,
build_dir,
include_dirs,
library_dirs,
libraries,
macros,
compiler_preargs,
compiler_postargs,
linker_preargs,
linker_postargs,
)
if checksum != old_checksum:
cache = False
output = os.path.join(output_dir, output)
if cache and is_up_to_date(output, sources + preprocess):
return load_module(name, output)
with open(os.path.join(build_dir, name + '.log'), 'wb+') as build_log:
shutil.rmtree(module_home, ignore_errors=True)
os.makedirs(module_home, exist_ok=True)
if source:
preprocess = [writeall(module_home, 'source.cpp', source)]
global_module_methods = {}
global_module_types = {}
sources = [(x, None) for x in sources]
for filename in preprocess:
source = readall('.', filename)
module_methods, module_types = parse_source(source, build_log)
global_module_methods.update(module_methods)
global_module_types.update(module_types)
code = render_template(source_template, module=name, types=module_types, methods=module_methods)
sources.append((writeall(module_home, filename, source + code), filename))
code = render_template(module_template, module=name, types=global_module_types, methods=global_module_methods)
sources.append((writeall(module_home, 'module.cpp', code), None))
exports = ['PyInit_' + name]
compiler = create_compiler()
def spawn(cmd):
old_path = os.getenv('path', '')
try:
os.environ['path'] = getattr(compiler, '_paths', old_path)
ret = subprocess.call(cmd, stdout=build_log, stderr=subprocess.STDOUT)
if ret:
build_log.seek(0)
entire_log = build_log.read().decode()
raise DistutilsExecError('Compiler failed:\n' + entire_log)
finally:
build_log.flush()
os.environ['path'] = old_path
compiler.spawn = spawn
for include_dir in include_dirs or []:
compiler.add_include_dir(include_dir)
for library_dir in library_dirs or []:
compiler.add_library_dir(library_dir)
try:
objects = compiler.object_filenames([source for source, original in sources], 0, build_dir)
todo = []
for pair, obj in zip(sources, objects):
source, original = pair
if not is_up_to_date(obj, [original or source]):
todo.append(pair)
if not cache:
todo = sources
if todo:
for source, original in todo:
original_folder = [os.path.abspath(os.path.dirname(original))] if original else []
compiler.compile(
[source],
build_dir,
macros,
original_folder,
0,
compiler_preargs,
compiler_postargs,
)
compiler.link(
'shared_object',
objects,
'output',
build_dir,
libraries,
[],
[],
exports,
0,
linker_preargs,
linker_postargs,
)
if os.path.isfile(output):
try:
os.unlink(output)
except PermissionError:
shutil.move(output, os.path.join(build_dir, '_' + os.urandom(8).hex()))
shutil.move(os.path.join(build_dir, 'output'), output)
except CompileError as ex:
raise ex from None
writeall(module_home, 'args.txt', checksum)
return load_module(name, output) | 0.001779 |
def _get_prog_memory(resources, cores_per_job):
"""Get expected memory usage, in Gb per core, for a program from resource specification.
"""
out = None
for jvm_opt in resources.get("jvm_opts", []):
if jvm_opt.startswith("-Xmx"):
out = _str_memory_to_gb(jvm_opt[4:])
memory = resources.get("memory")
if memory:
out = _str_memory_to_gb(memory)
prog_cores = resources.get("cores")
# if a single core with memory is requested for the job
# and we run multiple cores, scale down to avoid overscheduling
if out and prog_cores and int(prog_cores) == 1 and cores_per_job > int(prog_cores):
out = out / float(cores_per_job)
return out | 0.004255 |
def add_marker(self, marker):
"""
Adds the marker to the panel.
:param marker: Marker to add
:type marker: pyqode.core.modes.Marker
"""
self._markers.append(marker)
doc = self.editor.document()
assert isinstance(doc, QtGui.QTextDocument)
block = doc.findBlockByLineNumber(marker._position)
marker.block = block
d = TextDecoration(block)
d.set_full_width()
if self._background:
d.set_background(QtGui.QBrush(self._background))
marker.decoration = d
self.editor.decorations.append(d)
self.repaint() | 0.00312 |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._target_url is not None:
return False
if self._category is not None:
return False
if self._event_type is not None:
return False
if self._object_ is not None:
return False
return True | 0.005602 |
def newton_refine2(s_vals, curve1, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve1.plot(256)
ax.lines[-1].zorder = 1
curve2.plot(256, ax=ax)
ax.lines[-1].zorder = 1
points = curve1.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette("blue", 5)
ax.scatter(
points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2
)
ax.axis("scaled")
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
save_image(ax.figure, "newton_refine2.png") | 0.00177 |
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_applications()
self._find_usage_application_versions()
self._find_usage_environments()
self._have_usage = True
logger.debug("Done checking usage.") | 0.003454 |
def namify(root_uri):
'''Turns a root uri into a less noisy representation that will probably
make sense in most circumstances. Used by Navigator's __repr__, but can be
overridden if the Navigator is created with a 'name' parameter.'''
root_uri = unidecode.unidecode(decode(unquote(root_uri), 'utf-8'))
generic_domains = set(['localhost', 'herokuapp', 'appspot'])
urlp = urlparse.urlparse(fix_scheme(root_uri))
formatargs = collections.defaultdict(list)
netloc = urlp.netloc.lower()
if ']' in netloc:
domain = netloc.rsplit(']:', 1)[0] # don't need port
elif ':' in netloc:
domain = netloc.rsplit(':', 1)[0] # don't need port
else:
domain = netloc
if not translate(domain,"abcdef:.[]").isdigit():
if '.' in domain:
domain, tld = domain.rsplit('.', 1)
else:
tld = ''
if '.' in domain:
subdomain, domain = domain.rsplit('.', 1)
else:
subdomain = ''
if subdomain != 'www':
formatargs['subdomain'] = subdomain.split('.')
if domain not in generic_domains:
formatargs['domain'].append(domain)
if len(tld) == 2:
formatargs['tld'].append(tld.upper())
elif tld != 'com':
formatargs['tld'].append(tld)
formatargs['path'].extend(p for p in urlp.path.lower().split('/') if p)
formatargs['qargs'].extend(r for q in urlp.query.split(',')
for r in q.split('=') if q and r)
def capify(s):
'''Capitalizes the first letter of a string, but doesn't downcase the
rest like .title()'''
return s if not s else s[0].upper() + s[1:]
def piece_filter(piece):
if piece.lower() == 'api':
formatargs['api'] = True
return ''
elif re.match(r'v[\d.]+', piece):
formatargs['version'].extend(['.', piece])
return ''
elif 'api' in piece:
return piece.replace('api', 'API')
else:
return piece
chain = itertools.chain
pieces = map(capify, map(piece_filter, chain(
formatargs['subdomain'],
formatargs['domain'],
formatargs['tld'],
formatargs['path'],
formatargs['qargs'],
)))
return '{pieces}{api}{vrsn}'.format(pieces=''.join(pieces),
api='API' if formatargs['api'] else '',
vrsn=''.join(formatargs['version']),
) | 0.001183 |
def config_ref_role(name, rawtext, text, lineno, inliner,
options=None, content=None):
"""Process a role that references the target nodes created by the
``lsst-config-topic`` directive.
Parameters
----------
name
The role name used in the document.
rawtext
The entire markup snippet, with role.
text
The text marked with the role.
lineno
The line number where ``rawtext`` appears in the input.
inliner
The inliner instance that called us.
options
Directive options for customization.
content
The directive content for customization.
Returns
-------
nodes : `list`
List of nodes to insert into the document.
messages : `list`
List of system messages.
See also
--------
`format_config_id`
`ConfigTopicTargetDirective`
`pending_config_xref`
"""
node = pending_config_xref(rawsource=text)
return [node], [] | 0.001011 |
def set_servo_angle(self, goalangle, goaltime, led):
""" Sets the servo angle (in degrees)
Enable torque using torque_on function before calling this
Args:
goalangle (int): The desired angle in degrees, range -150 to 150
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
if (self.servomodel==0x06) or (self.servomodel == 0x04):
goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129)
else:
goalposition = scale(goalangle, -150, 150, 21, 1002)
self.set_servo_position(goalposition, goaltime, led) | 0.003645 |
def _prepare_polib_files(files_dict, filename, languages,
locale_root, po_files_path, header):
"""
Prepare polib file object for writing/reading from them.
Create directories and write header if needed. For each language,
ensure there's a translation file named "filename" in the correct place.
Assumes (and creates) a directory structure:
<locale_root>/<lang>/<po_files_path>/<filename>.
"""
files_dict[filename] = {}
for lang in languages:
file_path = os.path.join(locale_root, lang, po_files_path)
if not os.path.exists(file_path):
os.makedirs(file_path)
if header is not None:
_write_header(os.path.join(file_path, filename), lang, header)
files_dict[filename][lang] = polib.pofile(
os.path.join(file_path, filename), encoding="UTF-8") | 0.001149 |
def process_mgi_note_allele_view(self, limit=None):
"""
These are the descriptive notes about the alleles.
Note that these notes have embedded HTML -
should we do anything about that?
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Assembling notes on alleles")
raw = '/'.join((self.rawdir, 'mgi_note_allele_view'))
notehash = {}
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
if line_counter == 1:
continue
(object_key, notetype, note, sequencenum) = line
# read all the notes into a hash to concatenate
if object_key not in notehash:
notehash[object_key] = {}
if notetype not in notehash[object_key]:
notehash[object_key][notetype] = []
if len(notehash[object_key][notetype]) < int(sequencenum):
for i in range(
len(notehash[object_key][notetype]),
int(sequencenum)
):
notehash[object_key][notetype].append('') # ??? I don't get it
notehash[object_key][notetype][int(sequencenum)-1] = note.strip()
# finish iteration over notes
line_counter = 0
for allele_key in notehash:
if self.test_mode is True:
if int(allele_key) not in self.test_keys.get('allele'):
continue
line_counter += 1
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
continue
for n in notehash[allele_key]:
LOG.info(
"found %d %s notes for %s",
len(notehash[allele_key]), n, allele_id)
notes = ''.join(notehash[allele_key][n])
notes += ' ['+n+']'
model.addDescription(allele_id, notes)
if not self.test_mode and limit is not None and line_counter > limit:
break
return | 0.002059 |
def get_i_text(node):
"""
Get the text for an Indicator node.
:param node: Indicator node.
:return:
"""
if node.tag != 'Indicator':
raise IOCParseError('Invalid tag: {}'.format(node.tag))
s = node.get('operator').upper()
return s | 0.006536 |
def _set_available_combinations(self):
"""
Generate all connected outputs combinations and
set the max display width while iterating.
"""
available = set()
combinations_map = {}
whitelist = None
if self.output_combinations:
whitelist = self.output_combinations.split("|")
self.max_width = 0
for output in range(len(self.layout["connected"])):
for comb in combinations(self.layout["connected"], output + 1):
for mode in ["clone", "extend"]:
string = self._get_string_and_set_width(comb, mode)
if whitelist and string not in whitelist:
continue
if len(comb) == 1:
combinations_map[string] = (comb, None)
else:
combinations_map[string] = (comb, mode)
available.add(string)
# Preserve the order in which user defined the output combinations
if whitelist:
available = reversed([comb for comb in whitelist if comb in available])
self.available_combinations = deque(available)
self.combinations_map = combinations_map | 0.0024 |
def processHierarchical(self):
"""Main process for hierarchical segmentation.
Returns
-------
est_idxs : list
List containing estimated times for each layer in the hierarchy
as np.arrays
est_labels : list
List containing estimated labels for each layer in the hierarchy
as np.arrays
"""
# Preprocess to obtain features, times, and input boundary indeces
F, dur = features(self.file_struct, self.annot_beats, self.framesync)
try:
# Load and apply transform
W = load_transform(self.config["transform"])
F = W.dot(F)
# Get Segments
kmin, kmax = get_num_segs(dur)
# Run algorithm layer by layer
est_idxs = []
est_labels = []
for k in range(kmin, kmax):
S, cost = get_k_segments(F, k)
est_idxs.append(S)
est_labels.append(np.ones(len(S) - 1) * -1)
# Make sure that the first and last boundaries are included
assert est_idxs[-1][0] == 0 and \
est_idxs[-1][-1] == F.shape[1] - 1, "Layer %d does not " \
"start or end in the right frame(s)." % k
# Post process layer
est_idxs[-1], est_labels[-1] = \
self._postprocess(est_idxs[-1], est_labels[-1])
except:
# The audio file is too short, only beginning and end
logging.warning("Audio file too short! "
"Only start and end boundaries.")
est_idxs = [np.array([0, F.shape[1] - 1])]
est_labels = [np.ones(1) * -1]
return est_idxs, est_labels | 0.002247 |
def get_full_returns(self, jid, minions, timeout=None):
'''
This method starts off a watcher looking at the return data for
a specified jid, it returns all of the information for the jid
'''
# TODO: change this from ret to return... or the other way.
# Its inconsistent, we should pick one
ret = {}
# create the iterator-- since we want to get anyone in the middle
event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout)
try:
data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid)
except Exception as exc:
raise SaltClientError('Returner {0} could not fetch jid data. '
'Exception details: {1}'.format(
self.opts['master_job_cache'],
exc))
for minion in data:
m_data = {}
if 'return' in data[minion]:
m_data['ret'] = data[minion].get('return')
else:
m_data['ret'] = data[minion].get('return')
if 'out' in data[minion]:
m_data['out'] = data[minion]['out']
if minion in ret:
ret[minion].update(m_data)
else:
ret[minion] = m_data
# if we have all the minion returns, lets just return
if len(set(ret).intersection(minions)) >= len(minions):
return ret
# otherwise lets use the listener we created above to get the rest
for event_ret in event_iter:
# if nothing in the event_ret, skip
if event_ret == {}:
time.sleep(0.02)
continue
for minion, m_data in six.iteritems(event_ret):
if minion in ret:
ret[minion].update(m_data)
else:
ret[minion] = m_data
# are we done yet?
if len(set(ret).intersection(minions)) >= len(minions):
return ret
# otherwise we hit the timeout, return what we have
return ret | 0.001383 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_id') and self.feedback_id is not None:
_dict['feedback_id'] = self.feedback_id
if hasattr(self, 'user_id') and self.user_id is not None:
_dict['user_id'] = self.user_id
if hasattr(self, 'comment') and self.comment is not None:
_dict['comment'] = self.comment
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'feedback_data') and self.feedback_data is not None:
_dict['feedback_data'] = self.feedback_data._to_dict()
return _dict | 0.002688 |
def closeGlyphsOverGSUB(gsub, glyphs):
""" Use the FontTools subsetter to perform a closure over the GSUB table
given the initial `glyphs` (set of glyph names, str). Update the set
in-place adding all the glyph names that can be reached via GSUB
substitutions from this initial set.
"""
subsetter = subset.Subsetter()
subsetter.glyphs = glyphs
gsub.closure_glyphs(subsetter) | 0.002463 |
def validate(self):
"""
validate the RevocationReason object
"""
if not isinstance(self.revocation_code, RevocationReasonCode):
msg = "RevocationReaonCode expected"
raise TypeError(msg)
if self.revocation_message is not None:
if not isinstance(self.revocation_message, TextString):
msg = "TextString expect"
raise TypeError(msg) | 0.004587 |
def new_deploy(py_ver: PyVer, release_target: ReleaseTarget):
"""Job for deploying package to pypi"""
cache_file = f'app_{py_ver.name}.tar'
template = yaml.safe_load(f"""
machine:
image: circleci/classic:201710-02
steps:
- attach_workspace:
at: {cache_dir}
- checkout
- run:
name: Install prerequisites
command: sudo pip install awscli
- run:
name: Load docker image layer cache
command: docker load -i {cache_dir}/{cache_file}
- run:
name: Start a named container
command: docker run --name=SDK {py_ver.tag}
- run:
name: Extract the documentation
command: 'docker cp SDK:/build/built_docs ./built_docs'
- run:
name: Upload the documentation
command: >-
aws s3 sync --delete --cache-control
max-age=3600 built_docs s3://mbed-cloud-sdk-python
- run:
name: Tag and release
command: >-
docker run --env-file=scripts/templates/envvars.env
-e TWINE_REPOSITORY={release_target.twine_repo}
{py_ver.tag}
sh -c "source .venv/bin/activate && python scripts/tag_and_release.py --mode={release_target.mode}"
- run:
name: Start the release party!
command: >-
docker run --env-file=scripts/templates/envvars.env
{py_ver.tag}
sh -c "source .venv/bin/activate && python scripts/notify.py"
""")
return deploy_name(py_ver, release_target), template | 0.001269 |
def find_asts(self, ast_root, name):
'''
Finds an AST node with the given name and the entire subtree under it.
A function borrowed from scottfrazer. Thank you Scott Frazer!
:param ast_root: The WDL AST. The whole thing generally, but really
any portion that you wish to search.
:param name: The name of the subtree you're looking for, like "Task".
:return: nodes representing the AST subtrees matching the "name" given.
'''
nodes = []
if isinstance(ast_root, wdl_parser.AstList):
for node in ast_root:
nodes.extend(self.find_asts(node, name))
elif isinstance(ast_root, wdl_parser.Ast):
if ast_root.name == name:
nodes.append(ast_root)
for attr_name, attr in ast_root.attributes.items():
nodes.extend(self.find_asts(attr, name))
return nodes | 0.002125 |
def _cf_dictionary_from_tuples(tuples):
"""
Given a list of Python tuples, create an associated CFDictionary.
"""
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
) | 0.00141 |
def blast_seqs(seqs,
blast_constructor,
blast_db=None,
blast_mat_root=None,
params={},
add_seq_names=True,
out_filename=None,
WorkingDir=None,
SuppressStderr=None,
SuppressStdout=None,
input_handler=None,
HALT_EXEC=False
):
"""Blast list of sequences.
seqs: either file name or list of sequence objects or list of strings or
single multiline string containing sequences.
WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules
for data are as follows. If it's s list, treat as lines, unless
add_seq_names is true (in which case treat as list of seqs). If it's a
string, test whether it has newlines. If it doesn't have newlines, assume
it's a filename. If it does have newlines, it can't be a filename, so
assume it's a multiline string containing sequences.
If you want to skip the detection and force a specific type of input
handler, use input_handler='your_favorite_handler'.
add_seq_names: boolean. if True, sequence names are inserted in the list
of sequences. if False, it assumes seqs is a list of lines of some
proper format that the program can handle
"""
# set num keep
if blast_db:
params["-d"] = blast_db
if out_filename:
params["-o"] = out_filename
ih = input_handler or guess_input_handler(seqs, add_seq_names)
blast_app = blast_constructor(
params=params,
blast_mat_root=blast_mat_root,
InputHandler=ih,
WorkingDir=WorkingDir,
SuppressStderr=SuppressStderr,
SuppressStdout=SuppressStdout,
HALT_EXEC=HALT_EXEC)
return blast_app(seqs) | 0.006774 |
def check_courses(self, kcdms):
"""
检查课程是否被选
@structure [bool]
:param kcdms: 课程代码列表
:return: 与课程代码列表长度一致的布尔值列表, 已为True,未选为False
"""
selected_courses = self.get_selected_courses()
selected_kcdms = {course['课程代码'] for course in selected_courses}
result = [True if kcdm in selected_kcdms else False for kcdm in kcdms]
return result | 0.004878 |
def _resolve_hostname(name):
"""Returns resolved hostname using the ssh config"""
if env.ssh_config is None:
return name
elif not os.path.exists(os.path.join("nodes", name + ".json")):
resolved_name = env.ssh_config.lookup(name)['hostname']
if os.path.exists(os.path.join("nodes", resolved_name + ".json")):
name = resolved_name
return name | 0.002551 |
def check_match(self, name):
"""
Check if a release version matches any of the specificed patterns.
Parameters
==========
name: str
Release name
Returns
=======
bool:
True if it matches, False otherwise.
"""
return any(pattern.match(name) for pattern in self.patterns) | 0.005348 |
def delta(f, s, d=None):
"""
Create a delta for the file `f` using the signature read from `s`. The delta
will be written to `d`. If `d` is omitted, a temporary file will be used.
This function returns the delta file `d`. All parameters must be file-like
objects.
"""
if d is None:
d = tempfile.SpooledTemporaryFile(max_size=MAX_SPOOL, mode='wb+')
sig = ctypes.c_void_p()
try:
job = _librsync.rs_loadsig_begin(ctypes.byref(sig))
try:
_execute(job, s)
finally:
_librsync.rs_job_free(job)
r = _librsync.rs_build_hash_table(sig)
if r != RS_DONE:
raise LibrsyncError(r)
job = _librsync.rs_delta_begin(sig)
try:
_execute(job, f, d)
finally:
_librsync.rs_job_free(job)
finally:
_librsync.rs_free_sumset(sig)
return d | 0.002237 |
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run | 0.002581 |
def set_default(self, section, option, default_value):
"""
Set Default value for a given (section, option)
-> called when a new (section, option) is set and no default exists
"""
section = self._check_section_option(section, option)
for sec, options in self.defaults:
if sec == section:
options[ option ] = default_value | 0.009901 |
def _load_reports(self, report_files):
"""
Args:
report_files: list[file] reports to read in
"""
contents = []
for file_handle in report_files:
# Convert to unicode, replacing unreadable chars
contents.append(
file_handle.read().decode(
'utf-8',
'replace'
)
)
return contents | 0.004515 |
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path | 0.004678 |
def execute_message_call(
laser_evm,
callee_address,
caller_address,
origin_address,
code,
data,
gas_limit,
gas_price,
value,
track_gas=False,
) -> Union[None, List[GlobalState]]:
"""Execute a message call transaction from all open states.
:param laser_evm:
:param callee_address:
:param caller_address:
:param origin_address:
:param code:
:param data:
:param gas_limit:
:param gas_price:
:param value:
:param track_gas:
:return:
"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
open_states = laser_evm.open_states[:]
del laser_evm.open_states[:]
for open_world_state in open_states:
next_transaction_id = get_next_transaction_id()
transaction = MessageCallTransaction(
world_state=open_world_state,
identifier=next_transaction_id,
gas_price=gas_price,
gas_limit=gas_limit,
origin=origin_address,
code=Disassembly(code),
caller=caller_address,
callee_account=open_world_state[callee_address],
call_data=ConcreteCalldata(next_transaction_id, data),
call_value=value,
)
_setup_global_state_for_execution(laser_evm, transaction)
return laser_evm.exec(track_gas=track_gas) | 0.001451 |
def imported_member(self, node, member, name):
"""verify this is not an imported class or handle it"""
# /!\ some classes like ExtensionClass doesn't have a __module__
# attribute ! Also, this may trigger an exception on badly built module
# (see http://www.logilab.org/ticket/57299 for instance)
try:
modname = getattr(member, "__module__", None)
except TypeError:
modname = None
if modname is None:
if name in ("__new__", "__subclasshook__"):
# Python 2.5.1 (r251:54863, Sep 1 2010, 22:03:14)
# >>> print object.__new__.__module__
# None
modname = builtins.__name__
else:
attach_dummy_node(node, name, member)
return True
real_name = {"gtk": "gtk_gtk", "_io": "io"}.get(modname, modname)
if real_name != self._module.__name__:
# check if it sounds valid and then add an import node, else use a
# dummy node
try:
getattr(sys.modules[modname], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, modname, name)
return True
return False | 0.001493 |
def great_circle_Npoints(lonlat1r, lonlat2r, N):
"""
N points along the line joining lonlat1 and lonlat2
"""
ratio = np.linspace(0.0,1.0, N).reshape(-1,1)
xyz1 = lonlat2xyz(lonlat1r[0], lonlat1r[1])
xyz2 = lonlat2xyz(lonlat2r[0], lonlat2r[1])
mids = ratio * xyz2 + (1.0-ratio) * xyz1
norm = np.sqrt((mids**2).sum(axis=1))
xyzN = mids / norm.reshape(-1,1)
lonlatN = xyz2lonlat( xyzN[:,0], xyzN[:,1], xyzN[:,2])
return lonlatN | 0.018987 |
def handle_existing_user(self, provider, user, access, info):
"Login user and redirect."
login(self.request, user)
return redirect(self.get_login_redirect(provider, user, access)) | 0.009852 |
def format_items(x):
"""Returns a succinct summaries of all items in a sequence as strings"""
x = np.asarray(x)
timedelta_format = 'datetime'
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype='timedelta64[ns]')
day_part = (x[~pd.isnull(x)]
.astype('timedelta64[D]')
.astype('timedelta64[ns]'))
time_needed = x[~pd.isnull(x)] != day_part
day_needed = day_part != np.timedelta64(0, 'ns')
if np.logical_not(day_needed).all():
timedelta_format = 'time'
elif np.logical_not(time_needed).all():
timedelta_format = 'date'
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted | 0.001342 |
def set_global_tracer(value):
"""Sets the global tracer.
It is an error to pass ``None``.
:param value: the :class:`Tracer` used as global instance.
:type value: :class:`Tracer`
"""
if value is None:
raise ValueError('The global Tracer tracer cannot be None')
global tracer, is_tracer_registered
tracer = value
is_tracer_registered = True | 0.002604 |
def init(*args, **kwargs):
"""Initializes the SDK and optionally integrations.
This takes the same arguments as the client constructor.
"""
global _initial_client
client = Client(*args, **kwargs)
Hub.current.bind_client(client)
rv = _InitGuard(client)
if client is not None:
_initial_client = weakref.ref(client)
return rv | 0.002725 |
def compute(self, base, *args, **kwargs):
'''
Returns the value of the discount.
@param base:float Computation base.
@return: Decimal
'''
return min(base, super(Discount, self).compute(base, *args, **kwargs)) | 0.007813 |
def get_available_positions(self):
"""Return a list of empty slot numbers
"""
available_positions = ["new"]
layout = self.context.getLayout()
used_positions = [int(slot["position"]) for slot in layout]
if used_positions:
used = [
pos for pos in range(1, max(used_positions) + 1) if
pos not in used_positions]
available_positions.extend(used)
return available_positions | 0.004167 |
def get_url(width, height, color=True):
"""
Craft the URL for a placekitten image.
By default they are in color. To retrieve a grayscale image, set
the color kwarg to False.
"""
d = dict(width=width, height=height)
return URL % d | 0.003876 |
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analagous to dict.iteritems()
'''
for i in safe_range(self.field('ma_mask') + 1):
ep = self.field('ma_table') + i
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value) | 0.004141 |
def safe_tag(self, tag, errors='strict'):
"""URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API.
Args:
tag (string): The tag to be truncated
Returns:
(string): The truncated tag
"""
if tag is not None:
try:
# handle unicode characters and url encode tag value
tag = quote(self.s(tag, errors=errors), safe='~')[:128]
except KeyError as e:
warn = 'Failed converting tag to safetag ({})'.format(e)
self.log.warning(warn)
return tag | 0.004847 |
def get_attribute(self, attribute: str) -> 'Node':
"""Returns the node representing the given attribute's value.
Use only if is_mapping() returns true.
Args:
attribute: The name of the attribute to retrieve.
Raises:
KeyError: If the attribute does not exist.
Returns:
A node representing the value.
"""
matches = [
value_node for key_node, value_node in self.yaml_node.value
if key_node.value == attribute
]
if len(matches) != 1:
raise SeasoningError(
'Attribute not found, or found multiple times: {}'.format(
matches))
return Node(matches[0]) | 0.002717 |
def boxes_intersect(box1, box2):
"""Determines if two rectangles, each input as a tuple
(xmin, xmax, ymin, ymax), intersect."""
xmin1, xmax1, ymin1, ymax1 = box1
xmin2, xmax2, ymin2, ymax2 = box2
if interval_intersection_width(xmin1, xmax1, xmin2, xmax2) and \
interval_intersection_width(ymin1, ymax1, ymin2, ymax2):
return True
else:
return False | 0.002475 |
def com_google_fonts_check_linegaps(ttFont):
"""Checking Vertical Metric Linegaps."""
if ttFont["hhea"].lineGap != 0:
yield WARN, Message("hhea", "hhea lineGap is not equal to 0.")
elif ttFont["OS/2"].sTypoLineGap != 0:
yield WARN, Message("OS/2", "OS/2 sTypoLineGap is not equal to 0.")
else:
yield PASS, "OS/2 sTypoLineGap and hhea lineGap are both 0." | 0.013369 |
def def_coordinator(self, year):
"""Returns the coach ID for the team's DC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the DC.
"""
try:
dc_anchor = self._year_info_pq(year, 'Defensive Coordinator')('a')
if dc_anchor:
return dc_anchor.attr['href']
except ValueError:
return None | 0.004608 |
def get_argparser(parser=None):
"""Customize a parser to get the correct options."""
parser = parser or argparse.ArgumentParser()
parser.add_argument("--host", default="0.0.0.0", help="Host listen address")
parser.add_argument("--port", "-p", default=9050, help="Listen port", type=int)
parser.add_argument(
"--debug",
"-d",
default=False,
action="store_true",
help="Enable debug mode",
)
parser.add_argument(
"--log-level",
"-l",
default="INFO",
help="Log Level, empty string to disable.",
)
parser.add_argument(
"--twisted",
default=False,
action="store_true",
help="Use twisted to server requests.",
)
parser.add_argument(
"--gunicorn",
default=False,
action="store_true",
help="Use gunicorn to server requests.",
)
parser.add_argument(
"--threads", default=None, help="Number of threads to use.", type=int
)
parser.add_argument("--disable-embedded-logging",
default=False,
action="store_true",
help="Disable embedded logging configuration")
return parser | 0.003608 |
def str_to_datetime(ts):
"""Format a string to a datetime object.
This functions supports several date formats like YYYY-MM-DD, MM-DD-YYYY
and YY-MM-DD. When the given data is None or an empty string, the function
returns None.
:param ts: string to convert
:returns: a datetime object
:raises IvalidDateError: when the given string cannot be converted into
a valid date
"""
if not ts:
return None
try:
return dateutil.parser.parse(ts).replace(tzinfo=None)
except Exception:
raise InvalidDateError(date=str(ts)) | 0.001692 |
def get_node_label(self, model):
"""
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
"""
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label) | 0.003155 |
def do_load(self, arg):
"""Loads a saved session variables, settings and test results to the shell."""
from os import path
import json
fullpath = path.expanduser(arg)
if path.isfile(fullpath):
with open(fullpath) as f:
data = json.load(f)
#Now, reparse the staging directories that were present in the saved session.
for stagepath in data["tests"]:
self.do_parse(stagepath)
self.args = data["args"] | 0.009634 |
def requests():
"""List all pending memberships, listed only for group admins."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
memberships = Membership.query_requests(current_user, eager=True).all()
return render_template(
'invenio_groups/pending.html',
memberships=memberships,
requests=True,
page=page,
per_page=per_page,
) | 0.002257 |
def start(self):
""" Start the SSH tunnels """
if self.is_alive:
self.logger.warning('Already started!')
return
self._create_tunnels()
if not self.is_active:
self._raise(BaseSSHTunnelForwarderError,
reason='Could not establish session to SSH gateway')
for _srv in self._server_list:
thread = threading.Thread(
target=self._serve_forever_wrapper,
args=(_srv, ),
name='Srv-{0}'.format(address_to_str(_srv.local_port))
)
thread.daemon = self.daemon_forward_servers
thread.start()
self._check_tunnel(_srv)
self.is_alive = any(self.tunnel_is_up.values())
if not self.is_alive:
self._raise(HandlerSSHTunnelForwarderError,
'An error occurred while opening tunnels.') | 0.002176 |
def upload(self, filename, directory=None):
"""
Upload a file ``filename`` to ``directory``
:param str filename: path to the file to upload
:param directory: destionation :class:`.Directory`, defaults to
:attribute:`.API.downloads_directory` if None
:return: the uploaded file
:rtype: :class:`.File`
"""
filename = eval_path(filename)
if directory is None:
directory = self.downloads_directory
# First request
res1 = self._req_upload(filename, directory)
data1 = res1['data']
file_id = data1['file_id']
# Second request
res2 = self._req_file(file_id)
data2 = res2['data'][0]
data2.update(**data1)
return _instantiate_uploaded_file(self, data2) | 0.002457 |
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure | 0.003891 |
def display_user(value, arg):
''' Return 'You' if value is equal to arg.
Parameters:
value should be a userprofile
arg should be another user.
Ideally, value should be a userprofile from an object and arg the user logged in.
'''
if value.user == arg and arg.username != ANONYMOUS_USERNAME:
return "You"
else:
return value.user.get_full_name() | 0.004831 |
def load_and_parse(self, package_name, root_dir, relative_dirs,
resource_type, tags=None):
"""Load and parse models in a list of directories. Returns a dict
that maps unique ids onto ParsedNodes"""
extension = "[!.#~]*.sql"
if tags is None:
tags = []
if dbt.flags.STRICT_MODE:
dbt.contracts.project.ProjectList(**self.all_projects)
file_matches = dbt.clients.system.find_matching(
root_dir,
relative_dirs,
extension)
result = []
for file_match in file_matches:
file_contents = dbt.clients.system.load_file_contents(
file_match.get('absolute_path'))
parts = dbt.utils.split_path(file_match.get('relative_path', ''))
name, _ = os.path.splitext(parts[-1])
path = self.get_compiled_path(name,
file_match.get('relative_path'))
original_file_path = os.path.join(
file_match.get('searched_path'),
path)
result.append({
'name': name,
'root_path': root_dir,
'resource_type': resource_type,
'path': path,
'original_file_path': original_file_path,
'package_name': package_name,
'raw_sql': file_contents
})
return self.parse_sql_nodes(result, tags) | 0.002012 |
def plotnoise(noisepkl, mergepkl, plot_width=950, plot_height=400):
""" Make two panel plot to summary noise analysis with estimated flux scale """
d = pickle.load(open(mergepkl))
ndist, imstd, flagfrac = plotnoisedist(noisepkl, plot_width=plot_width/2, plot_height=plot_height)
fluxscale = calcfluxscale(d, imstd, flagfrac)
logger.info('Median image noise is {0:.3} Jy.'.format(fluxscale*imstd))
ncum, imnoise = plotnoisecum(noisepkl, fluxscale=fluxscale, plot_width=plot_width/2, plot_height=plot_height)
hndle = show(Row(ndist, ncum, width=plot_width, height=plot_height))
return imnoise | 0.00641 |
def start(name, call=None):
'''
Start a VM.
.. versionadded:: 2016.3.0
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a start my-vm
'''
if call != 'action':
raise SaltCloudSystemExit(
'The start action must be called with -a or --action.'
)
log.info('Starting node %s', name)
return vm_action(name, kwargs={'action': 'resume'}, call=call) | 0.002165 |
def find_module(name, path=None):
"""imp.find_module variant that only return path of module.
The `imp.find_module` returns a filehandle that we are not interested in.
Also we ignore any bytecode files that `imp.find_module` finds.
Parameters
----------
name : str
name of module to locate
path : list of str
list of paths to search for `name`. If path=None then search sys.path
Returns
-------
filename : str
Return full path of module or None if module is missing or does not have
.py or .pyw extension
"""
if name is None:
return None
try:
file, filename, _ = imp.find_module(name, path)
except ImportError:
return None
if file is None:
return filename
else:
file.close()
if os.path.splitext(filename)[1] in [".py", "pyc"]:
return filename
else:
return None | 0.003236 |
def str_lstrip(x, to_strip=None):
"""Remove leading characters from a string sample.
:param str to_strip: The string to be removed
:returns: an expression containing the modified string column.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.lstrip(to_strip='very ')
Expression = str_lstrip(text, to_strip='very ')
Length: 5 dtype: str (expression)
---------------------------------
0 Something
1 pretty
2 is coming
3 our
4 way.
"""
# in c++ we give empty string the same meaning as None
sl = _to_string_sequence(x).lstrip('' if to_strip is None else to_strip) if to_strip != '' else x
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) | 0.003021 |
def add_ret_hash_memo(self, memo_return):
"""Set the memo for the transaction to a new :class:`RetHashMemo
<stellar_base.memo.RetHashMemo>`.
:param bytes memo_return: A 32 byte hash or hex encoded string intended to be interpreted as
the hash of the transaction the sender is refunding.
:type memo_return: bytes, str
:return: This builder instance.
"""
memo_return = memo.RetHashMemo(memo_return)
return self.add_memo(memo_return) | 0.005906 |
def pad(img, padding, fill=0, padding_mode='constant'):
r"""Pad the given PIL Image on all sides with specified padding mode and fill value.
Args:
img (PIL Image): Image to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image: Padded image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not isinstance(padding, (numbers.Number, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Padding mode should be either constant, edge, reflect or symmetric'
if padding_mode == 'constant':
if img.mode == 'P':
palette = img.getpalette()
image = ImageOps.expand(img, border=padding, fill=fill)
image.putpalette(palette)
return image
return ImageOps.expand(img, border=padding, fill=fill)
else:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, Sequence) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
if img.mode == 'P':
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
img = Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
return Image.fromarray(img) | 0.003873 |
def do_bestfit(self):
"""
Do bestfit
"""
self.check_important_variables()
x = np.array(self.args["x"])
y = np.array(self.args["y"])
p = self.args.get("params", np.ones(self.args["num_vars"]))
self.fit_args, self.cov = opt.curve_fit(self.args["func"], x, y, p)
return self.fit_args | 0.005682 |
def transform(self, mode=None):
'''
Set the current transform mode.
:param mode: CENTER or CORNER'''
if mode:
self._canvas.mode = mode
return self._canvas.mode | 0.009434 |
def update_issue_link_type(self, issue_link_type_id, data):
"""
Update the specified issue link type.
:param issue_link_type_id:
:param data: {
"name": "Duplicate",
"inward": "Duplicated by",
"outward": "Duplicates"
}
:return:
"""
url = 'rest/api/2/issueLinkType/{issueLinkTypeId}'.format(issueLinkTypeId=issue_link_type_id)
return self.put(url, data=data) | 0.005814 |
def ccnot_circuit(qubits: Qubits) -> Circuit:
"""Standard decomposition of CCNOT (Toffoli) gate into
six CNOT gates (Plus Hadamard and T gates.) [Nielsen2000]_
.. [Nielsen2000]
M. A. Nielsen and I. L. Chuang, Quantum Computation and Quantum
Information, Cambridge University Press (2000).
"""
if len(qubits) != 3:
raise ValueError('Expected 3 qubits')
q0, q1, q2 = qubits
circ = Circuit()
circ += H(q2)
circ += CNOT(q1, q2)
circ += T(q2).H
circ += CNOT(q0, q2)
circ += T(q2)
circ += CNOT(q1, q2)
circ += T(q2).H
circ += CNOT(q0, q2)
circ += T(q1)
circ += T(q2)
circ += H(q2)
circ += CNOT(q0, q1)
circ += T(q0)
circ += T(q1).H
circ += CNOT(q0, q1)
return circ | 0.001285 |
def isargument(self, node):
""" checks whether node aliases to a parameter."""
try:
node_id, _ = self.node_to_id(node)
return (node_id in self.name_to_nodes and
any([isinstance(n, ast.Name) and
isinstance(n.ctx, ast.Param)
for n in self.name_to_nodes[node_id]]))
except UnboundableRValue:
return False | 0.00464 |
def insert_entity(self, entity):
'''
Adds an insert entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.insert_entity` for more
information on inserts.
The operation will not be executed until the batch is committed.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`azure.storage.table.models.Entity`
'''
request = _insert_entity(entity)
self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request) | 0.010606 |
def run_job(job_ini, log_level='info', log_file=None, exports='',
username=getpass.getuser(), **kw):
"""
Run a job using the specified config file and other options.
:param str job_ini:
Path to calculation config (INI-style) files.
:param str log_level:
'debug', 'info', 'warn', 'error', or 'critical'
:param str log_file:
Path to log file.
:param exports:
A comma-separated string of export types requested by the user.
:param username:
Name of the user running the job
:param kw:
Extra parameters like hazard_calculation_id and calculation_mode
"""
job_id = logs.init('job', getattr(logging, log_level.upper()))
with logs.handle(job_id, log_level, log_file):
job_ini = os.path.abspath(job_ini)
oqparam = eng.job_from_file(job_ini, job_id, username, **kw)
kw['username'] = username
eng.run_calc(job_id, oqparam, exports, **kw)
for line in logs.dbcmd('list_outputs', job_id, False):
safeprint(line)
return job_id | 0.000932 |
def _run_expiration(self, conn):
"""Return any items that have expired."""
# The logic here is sufficiently complicated, and we need
# enough random keys (Redis documentation strongly encourages
# not constructing key names in scripts) that we'll need to
# do this in multiple steps. This means that, when we do
# go in and actually expire things, we need to first check
# that they're still running.
# Get, and clear out, the list of expiring items
now = time.time()
script = conn.register_script("""
local result = redis.call("zrangebyscore", KEYS[1], 0, ARGV[1])
redis.call("zremrangebyscore", KEYS[1], 0, ARGV[1])
return result
""")
expiring = script(keys=[self._key_expiration()], args=[time.time()])
# Manually expire each item one by one
script = conn.register_script("""
-- item may have fallen out of the worker list, if someone finished
-- at just the very last possible moment (phew!)
local wworker = redis.call("hget", KEYS[3], "i" .. ARGV[1])
if not wworker then return end
-- we want to return item, plus everything it's reserved
local to_return = redis.call("smembers", KEYS[4])
to_return[#to_return + 1] = ARGV[1]
for i = 1, #to_return do
local pri = redis.call("hget", KEYS[2], to_return[i])
redis.call("zadd", KEYS[1], pri, to_return[i])
end
-- already removed from expiration list
-- remove from worker list too
redis.call("hdel", KEYS[3], "i" .. ARGV[1])
redis.call("hdel", KEYS[3], wworker)
""")
for item in expiring:
script(keys=[self._key_available(), self._key_priorities(),
self._key_workers(), self._key_reservations(item)],
args=[item]) | 0.001573 |
def get_callable_method_dict(obj):
"""Returns a dictionary of callable methods of object `obj`.
@param obj: ZOS API Python COM object
@return: a dictionary of callable methods
Notes:
the function only returns the callable attributes that are listed by dir()
function. Properties are not returned.
"""
methodDict = {}
for methodStr in dir(obj):
method = getattr(obj, methodStr, 'none')
if callable(method) and not methodStr.startswith('_'):
methodDict[methodStr] = method
return methodDict | 0.007092 |
async def confirmbalance(self, *args, **kwargs):
""" Confirm balance after trading
Accepts:
- message (signed dictionary):
- "txid" - str
- "coinid" - str
- "amount" - int
Returns:
- "address" - str
- "coinid" - str
- "amount" - int
- "uid" - int
- "unconfirmed" - int (0 by default)
- "deposit" - int (0 by default)
Verified: True
"""
# Get data from request
if kwargs.get("message"):
kwargs = json.loads(kwargs.get("message", "{}"))
txid = kwargs.get("txid")
coinid = kwargs.get("coinid")
buyer_address = kwargs.get("buyer_address")
cid = kwargs.get("cid")
address = kwargs.get("buyer_address")
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Check if required fields exists
if not all([coinid, cid, buyer_address, txid]):
return {"error":400, "reason": "Confirm balance. Missed required fields"}
if not coinid in settings.bridges.keys():
return await self.error_400("Confirm balance. Invalid coinid: %s" % coinid)
# Get offers price
self.account.blockchain.setendpoint(settings.bridges[coinid])
offer = await self.account.blockchain.getoffer(cid=cid,
buyer_address=buyer_address)
# Get offers price for updating balance
amount = int(offer["price"])
coinid = "PUT"
# Get sellers account
history_database = self.client[settings.HISTORY]
history_collection = history_database[coinid]
history = await history_collection.find_one({"txid":txid})
try:
account = await self.account.getaccountdata(public_key=history["public_key"])
except:
return await self.error_404("Confirm balance. Not found current deal.")
# Connect to balance database
database = self.client[self.collection]
balance_collection = database[coinid]
# Try to update balance if exists
balance = await balance_collection.find_one({"uid":account["id"]})
# Decrement unconfirmed
submitted = int(balance["amount_frozen"]) - int(amount)
if submitted < 0:
return await self.error_400("Not enough frozen amount.")
decremented = await balance_collection.find_one_and_update(
{"uid":account["id"]},
{"$set":{"amount_frozen": str(submitted)}})
difference = int(balance["amount_active"]) + int(amount)
updated = await balance_collection.find_one_and_update(
{"uid":account["id"]},
{"$set":{"amount_active":str(difference)}})
if not updated:
return {"error":404,
"reason":"Confirm balance. Not found current transaction id"}
# Delete transaction id field
await history_collection.find_one_and_update({"txid":txid},
{"$unset":{"txid":1}})
if int(account["level"]) == 2:
await self.account.updatelevel(**{"id":account["id"], "level":3})
return {i:updated[i] for i in updated if i != "_id" and i != "txid"} | 0.040975 |
def remap_label_indexers(data_obj, indexers, method=None, tolerance=None):
"""Given an xarray data object and label based indexers, return a mapping
of equivalent location based indexers. Also return a mapping of updated
pandas index objects (in case of multi-index level drop).
"""
if method is not None and not isinstance(method, str):
raise TypeError('``method`` must be a string')
pos_indexers = {}
new_indexes = {}
dim_indexers = get_dim_indexers(data_obj, indexers)
for dim, label in dim_indexers.items():
try:
index = data_obj.indexes[dim]
except KeyError:
# no index for this dimension: reuse the provided labels
if method is not None or tolerance is not None:
raise ValueError('cannot supply ``method`` or ``tolerance`` '
'when the indexed dimension does not have '
'an associated coordinate.')
pos_indexers[dim] = label
else:
idxr, new_idx = convert_label_indexer(index, label,
dim, method, tolerance)
pos_indexers[dim] = idxr
if new_idx is not None:
new_indexes[dim] = new_idx
return pos_indexers, new_indexes | 0.000754 |
def produceResource(self, request, segments, webViewer):
"""
Produce a resource that traverses site-wide content, passing down the
given webViewer. This delegates to the site store's
L{IMantissaSite} adapter, to avoid a conflict with the
L{ISiteRootPlugin} interface.
This method will typically be given an L{_AuthenticatedWebViewer}, which
can build an appropriate resource for an authenticated shell page,
whereas the site store's L{IWebViewer} adapter would show an anonymous
page.
The result of this method will be a L{_CustomizingResource}, to provide
support for resources which may provide L{ICustomizable}. Note that
Mantissa itself no longer implements L{ICustomizable} anywhere, though.
All application code should phase out inspecting the string passed to
ICustomizable in favor of getting more structured information from the
L{IWebViewer}. However, it has not been deprecated yet because
the interface which allows application code to easily access the
L{IWebViewer} from view code has not yet been developed; it is
forthcoming.
See ticket #2707 for progress on this.
"""
mantissaSite = self.publicSiteRoot
if mantissaSite is not None:
for resource, domain in userbase.getAccountNames(self.store):
username = '%s@%s' % (resource, domain)
break
else:
username = None
bottomResource, newSegments = mantissaSite.siteProduceResource(
request, segments, webViewer)
return (_CustomizingResource(bottomResource, username), newSegments)
return None | 0.002279 |
def gifs_translate_get(self, api_key, s, **kwargs):
"""
Translate Endpoint
The translate API draws on search, but uses the Giphy `special sauce` to handle translating from one vocabulary to another. In this case, words and phrases to GIFs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.gifs_translate_get(api_key, s, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: Giphy API Key. (required)
:param str s: Search term. (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.gifs_translate_get_with_http_info(api_key, s, **kwargs)
else:
(data) = self.gifs_translate_get_with_http_info(api_key, s, **kwargs)
return data | 0.003925 |
def add_group_entity(self, persons_plural, persons_ids, entity, instances_json):
"""
Add all instances of one of the model's entities as described in ``instances_json``.
"""
check_type(instances_json, dict, [entity.plural])
entity_ids = list(map(str, instances_json.keys()))
self.entity_ids[entity.plural] = entity_ids
self.entity_counts[entity.plural] = len(entity_ids)
persons_count = len(persons_ids)
persons_to_allocate = set(persons_ids)
self.memberships[entity.plural] = np.empty(persons_count, dtype = np.int32)
self.roles[entity.plural] = np.empty(persons_count, dtype = object)
self.entity_ids[entity.plural] = entity_ids
self.entity_counts[entity.plural] = len(entity_ids)
for instance_id, instance_object in instances_json.items():
check_type(instance_object, dict, [entity.plural, instance_id])
variables_json = instance_object.copy() # Don't mutate function input
roles_json = {
role.plural or role.key: transform_to_strict_syntax(variables_json.pop(role.plural or role.key, []))
for role in entity.roles
}
for role_id, role_definition in roles_json.items():
check_type(role_definition, list, [entity.plural, instance_id, role_id])
for index, person_id in enumerate(role_definition):
entity_plural = entity.plural
self.check_persons_to_allocate(persons_plural, entity_plural,
persons_ids,
person_id, instance_id, role_id,
persons_to_allocate, index)
persons_to_allocate.discard(person_id)
entity_index = entity_ids.index(instance_id)
role_by_plural = {role.plural or role.key: role for role in entity.roles}
for role_plural, persons_with_role in roles_json.items():
role = role_by_plural[role_plural]
if role.max is not None and len(persons_with_role) > role.max:
raise SituationParsingError([entity.plural, instance_id, role_plural], f"There can be at most {role.max} {role_plural} in a {entity.key}. {len(persons_with_role)} were declared in '{instance_id}'.")
for index_within_role, person_id in enumerate(persons_with_role):
person_index = persons_ids.index(person_id)
self.memberships[entity.plural][person_index] = entity_index
person_role = role.subroles[index_within_role] if role.subroles else role
self.roles[entity.plural][person_index] = person_role
self.init_variable_values(entity, variables_json, instance_id)
if persons_to_allocate:
entity_ids = entity_ids + list(persons_to_allocate)
for person_id in persons_to_allocate:
person_index = persons_ids.index(person_id)
self.memberships[entity.plural][person_index] = entity_ids.index(person_id)
self.roles[entity.plural][person_index] = entity.flattened_roles[0]
# Adjust previously computed ids and counts
self.entity_ids[entity.plural] = entity_ids
self.entity_counts[entity.plural] = len(entity_ids)
# Convert back to Python array
self.roles[entity.plural] = self.roles[entity.plural].tolist()
self.memberships[entity.plural] = self.memberships[entity.plural].tolist() | 0.006016 |
def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/')) | 0.007299 |
def get_variables(self, *args, **kwargs):
"""Provide a warning that get_variables on Sequential always returns ()."""
tf.logging.warning(
"Calling Sequential.get_variables, which will always return an empty "
"tuple. get_variables() can only return variables created directly by "
"a Module, or created by submodules directly created inside the "
"Module. Sequential is constructed from already constructed submodules "
"and so this will always be empty. See the documentation for more "
"details, but tl;dr if you need to connect some modules sequentially "
"and call get_variables on the result, writing a simple custom module "
"is the simplest way. Another option is to call get_all_variables().")
return super(Sequential, self).get_variables(*args, **kwargs) | 0.002384 |
def update_utxoset(self, transaction):
"""Update the UTXO set given ``transaction``. That is, remove
the outputs that the given ``transaction`` spends, and add the
outputs that the given ``transaction`` creates.
Args:
transaction (:obj:`~bigchaindb.models.Transaction`): A new
transaction incoming into the system for which the UTXO
set needs to be updated.
"""
spent_outputs = [
spent_output for spent_output in transaction.spent_outputs
]
if spent_outputs:
self.delete_unspent_outputs(*spent_outputs)
self.store_unspent_outputs(
*[utxo._asdict() for utxo in transaction.unspent_outputs]
) | 0.002663 |
def _get_rabbitmq_plugin():
'''
Returns the rabbitmq-plugin command path if we're running an OS that
doesn't put it in the standard /usr/bin or /usr/local/bin
This works by taking the rabbitmq-server version and looking for where it
seems to be hidden in /usr/lib.
'''
global RABBITMQ_PLUGINS
if RABBITMQ_PLUGINS is None:
version = __salt__['pkg.version']('rabbitmq-server').split('-')[0]
RABBITMQ_PLUGINS = ('/usr/lib/rabbitmq/lib/rabbitmq_server-{0}'
'/sbin/rabbitmq-plugins').format(version)
return RABBITMQ_PLUGINS | 0.001667 |
def get_version_by_value(context, value):
"""
Get the latest version that matches the provided ami-id
Args:
context: a populated EFVersionContext object
value: the value of the version to look for
"""
versions = get_versions(context)
for version in versions:
if version.value == value:
return version
fail("Didn't find a matching version for: "
"{}:{} in env/service: {}/{}".format(
context.key, value,
context.env, context.service_name)) | 0.012072 |
def show_report(self):
"""Show report."""
self.action_show_report.setEnabled(False)
self.action_show_log.setEnabled(True)
self.load_html_file(self.report_path) | 0.010471 |
def get_info(self, account, params={}):
"""
Gets account info.
@param account: account to get info for
@param params: parameters to retrieve
@return: AccountInfo
"""
res = self.invoke(zconstant.NS_ZIMBRA_ADMIN_URL,
sconstant.GetInfoRequest,
params)
return res | 0.005319 |
def store_json(self, filename, dict_to_store):
"""Store json files."""
filename = os.path.join(
self.data_dir,
filename + '.data'
)
fileops.dump_dict_to_file(
dict_to_store,
filename
) | 0.007353 |
def numberOfConnectedProximalSynapses(self, cells=None):
"""
Returns the number of proximal connected synapses on these cells.
Parameters:
----------------------------
@param cells (iterable)
Indices of the cells. If None return count for all cells.
"""
if cells is None:
cells = xrange(self.numberOfCells())
return _countWhereGreaterEqualInRows(self.proximalPermanences, cells,
self.connectedPermanenceProximal) | 0.003953 |
def build_api(packages, input, output, sanitizer, excluded_modules=None):
"""
Builds the Sphinx documentation API.
:param packages: Packages to include in the API.
:type packages: list
:param input: Input modules directory.
:type input: unicode
:param output: Output reStructuredText files directory.
:type output: unicode
:param sanitizer: Sanitizer python module.
:type sanitizer: unicode
:param excluded_modules: Excluded modules.
:type excluded_modules: list
:return: Definition success.
:rtype: bool
"""
LOGGER.info("{0} | Building Sphinx documentation API!".format(build_api.__name__))
sanitizer = import_sanitizer(sanitizer)
if os.path.exists(input):
shutil.rmtree(input)
os.makedirs(input)
excluded_modules = [] if excluded_modules is None else excluded_modules
packages_modules = {"apiModules": [],
"testsModules": []}
for package in packages:
package = __import__(package)
path = foundations.common.get_first_item(package.__path__)
package_directory = os.path.dirname(path)
for file in sorted(
list(foundations.walkers.files_walker(package_directory, filters_in=("{0}.*\.ui$".format(path),)))):
LOGGER.info("{0} | Ui file: '{1}'".format(build_api.__name__, file))
target_directory = os.path.dirname(file).replace(package_directory, "")
directory = "{0}{1}".format(input, target_directory)
if not foundations.common.path_exists(directory):
os.makedirs(directory)
source = os.path.join(directory, os.path.basename(file))
shutil.copyfile(file, source)
modules = []
for file in sorted(
list(foundations.walkers.files_walker(package_directory, filters_in=("{0}.*\.py$".format(path),),
filters_out=excluded_modules))):
LOGGER.info("{0} | Python file: '{1}'".format(build_api.__name__, file))
module = "{0}.{1}".format((".".join(os.path.dirname(file).replace(package_directory, "").split("/"))),
foundations.strings.get_splitext_basename(file)).strip(".")
LOGGER.info("{0} | Module name: '{1}'".format(build_api.__name__, module))
directory = os.path.dirname(os.path.join(input, module.replace(".", "/")))
if not foundations.common.path_exists(directory):
os.makedirs(directory)
source = os.path.join(directory, os.path.basename(file))
shutil.copyfile(file, source)
sanitizer.bleach(source)
if "__init__.py" in file:
continue
rst_file_path = "{0}{1}".format(module, FILES_EXTENSION)
LOGGER.info("{0} | Building API file: '{1}'".format(build_api.__name__, rst_file_path))
rst_file = File(os.path.join(output, rst_file_path))
header = ["_`{0}`\n".format(module),
"==={0}\n".format("=" * len(module)),
"\n",
".. automodule:: {0}\n".format(module),
"\n"]
rst_file.content.extend(header)
functions = OrderedDict()
classes = OrderedDict()
module_attributes = OrderedDict()
for member, object in module_browser._readmodule(module, [source, ]).iteritems():
if object.__class__ == module_browser.Function:
if not member.startswith("_"):
functions[member] = [".. autofunction:: {0}\n".format(member)]
elif object.__class__ == module_browser.Class:
classes[member] = [".. autoclass:: {0}\n".format(member),
" :show-inheritance:\n",
" :members:\n"]
elif object.__class__ == module_browser.Global:
if not member.startswith("_"):
module_attributes[member] = [".. attribute:: {0}.{1}\n".format(module, member)]
module_attributes and rst_file.content.append("Module Attributes\n-----------------\n\n")
for module_attribute in module_attributes.itervalues():
rst_file.content.extend(module_attribute)
rst_file.content.append("\n")
functions and rst_file.content.append("Functions\n---------\n\n")
for function in functions.itervalues():
rst_file.content.extend(function)
rst_file.content.append("\n")
classes and rst_file.content.append("Classes\n-------\n\n")
for class_ in classes.itervalues():
rst_file.content.extend(class_)
rst_file.content.append("\n")
rst_file.write()
modules.append(module)
packages_modules["apiModules"].extend([module for module in modules if not "tests" in module])
packages_modules["testsModules"].extend([module for module in modules if "tests" in module])
api_file = File("{0}{1}".format(output, FILES_EXTENSION))
api_file.content.extend(TOCTREE_TEMPLATE_BEGIN)
for module in packages_modules["apiModules"]:
api_file.content.append(" {0} <{1}>\n".format(module, "api/{0}".format(module)))
for module in packages_modules["testsModules"]:
api_file.content.append(" {0} <{1}>\n".format(module, "api/{0}".format(module)))
api_file.content.extend(TOCTREE_TEMPLATE_END)
api_file.write()
return True | 0.004257 |
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds) | 0.013889 |
def refresh(self):
""" refreshes a service """
params = {"f": "json"}
uURL = self._url + "/refresh"
res = self._get(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self.__init()
return res | 0.012723 |
def _parse_pool_options(options):
"""Parse connection pool options."""
max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE)
min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE)
max_idle_time_ms = options.get('maxidletimems', common.MAX_IDLE_TIME_MS)
if max_pool_size is not None and min_pool_size > max_pool_size:
raise ValueError("minPoolSize must be smaller or equal to maxPoolSize")
connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT)
socket_keepalive = options.get('socketkeepalive', True)
socket_timeout = options.get('sockettimeoutms')
wait_queue_timeout = options.get('waitqueuetimeoutms')
wait_queue_multiple = options.get('waitqueuemultiple')
event_listeners = options.get('event_listeners')
appname = options.get('appname')
ssl_context, ssl_match_hostname = _parse_ssl_options(options)
return PoolOptions(max_pool_size,
min_pool_size,
max_idle_time_ms,
connect_timeout, socket_timeout,
wait_queue_timeout, wait_queue_multiple,
ssl_context, ssl_match_hostname, socket_keepalive,
_EventListeners(event_listeners),
appname) | 0.000769 |
def plot_target(target, ax):
"""Ajoute la target au plot"""
ax.scatter(target[0], target[1], target[2], c="red", s=80) | 0.007937 |
def _install_hiero(use_threaded_wrapper):
"""Helper function to The Foundry Hiero support"""
import hiero
import nuke
if "--hiero" not in nuke.rawArgs:
raise ImportError
def threaded_wrapper(func, *args, **kwargs):
return hiero.core.executeInMainThreadWithResult(
func, args, kwargs)
_common_setup("Hiero", threaded_wrapper, use_threaded_wrapper) | 0.002494 |
def t_QUOTED_STRING(self, t):
r'\"[^\"]*\"'
t.lexer.lineno += len(re.findall(r'\r\n|\n|\r', t.value))
return t | 0.014925 |
def constraint_matches(self, c, m):
"""
Return dict noting the substitution values (or False for no match)
"""
if isinstance(m, tuple):
d = {}
if isinstance(c, Operator) and c._op_name == m[0]:
for c1, m1 in zip(c._args, m[1:]):
r = self.constraint_matches(c1, m1)
if r is False:
return r
d.update(r)
return d
return False
return m.match(c) | 0.003759 |
def register(self, path, help_text=None, help_context=None):
"""
Registers email template.
Example usage:
email_templates.register('hello_template.html', help_text=u'Hello template',
help_context={'username': u'Name of user in hello expression'})
:param path: Template file path. It will become immutable registry lookup key.
:param help_text: Help text to describe template in admin site
:param help_context: Dictionary of possible keys used in the context and description of their content
`help_context` items values may be strings or tuples of two strings. If strings, then email template preview
will use variable names to fill context, otherwise the second tuple element will become example value.
If an email template is already registered, this will raise AlreadyRegistered.
"""
if path in self._registry:
raise AlreadyRegistered('The template %s is already registered' % path)
self._registry[path] = RegistrationItem(path, help_text, help_context)
logger.debug("Registered email template %s", path) | 0.007799 |
def santalucia98_corrections(seq, pars_error):
'''Sum corrections for SantaLucia '98 method (unified parameters).
:param seq: sequence for which to calculate corrections.
:type seq: str
:param pars_error: dictionary of error corrections
:type pars_error: dict
:returns: Corrected delta_H and delta_S parameters
:rtype: list of floats
'''
deltas_corr = [0, 0]
first = str(seq)[0]
last = str(seq)[-1]
start_gc = first == 'G' or first == 'C'
start_at = first == 'A' or first == 'T'
end_gc = last == 'G' or last == 'C'
end_at = last == 'A' or last == 'T'
init_gc = start_gc + end_gc
init_at = start_at + end_at
symmetric = seq == seq.reverse_complement()
for i, delta in enumerate(['delta_h', 'delta_s']):
deltas_corr[i] += init_gc * pars_error[delta]['initGC']
deltas_corr[i] += init_at * pars_error[delta]['initAT']
if symmetric:
deltas_corr[i] += pars_error[delta]['symmetry']
return deltas_corr | 0.000985 |
def merge(self, resolvable, packages, parent=None):
"""Add a resolvable and its resolved packages."""
self.__tuples.append(_ResolvedPackages(resolvable, OrderedSet(packages),
parent, resolvable.is_constraint))
self._check() | 0.003597 |
def condensedDistance(dupes):
'''
Convert the pairwise list of distances in dupes to "condensed
distance matrix" required by the hierarchical clustering
algorithms. Also return a dictionary that maps the distance matrix
to the record_ids.
The formula for an index of the condensed matrix is
index = {N choose 2}-{N-row choose 2} + (col-row-1)
= N*(N-1)/2 - (N-row)*(N-row-1)/2 + col - row - 1
^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
matrix_length row_step
where (row,col) is index of an uncondensed square N X N distance matrix.
See http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html
'''
candidate_set = numpy.unique(dupes['pairs'])
i_to_id = dict(enumerate(candidate_set))
ids = candidate_set.searchsorted(dupes['pairs'])
row = ids[:, 0]
col = ids[:, 1]
N = len(candidate_set)
matrix_length = N * (N - 1) / 2
row_step = (N - row) * (N - row - 1) / 2
index = matrix_length - row_step + col - row - 1
condensed_distances = numpy.ones(int(matrix_length), 'f4')
condensed_distances[index.astype(int)] = 1 - dupes['score']
return i_to_id, condensed_distances, N | 0.001631 |
def _print_task_data(self, task):
"""Pretty-prints task data.
Args:
task: Task dict generated by Turbinia.
"""
print(' {0:s} ({1:s})'.format(task['name'], task['id']))
paths = task.get('saved_paths', [])
if not paths:
return
for path in paths:
if path.endswith('worker-log.txt'):
continue
if path.endswith('{0:s}.log'.format(task.get('id'))):
continue
if path.startswith('/'):
continue
print(' ' + path) | 0.012146 |
def get_free_sphere_params(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
out_file = "temp.res"
atmnet.calculate_free_sphere_parameters(out_file)
if os.path.isfile(out_file) and os.path.getsize(out_file) > 0:
with open(out_file, "rt") as fp:
output = fp.readline()
else:
output = ""
fields = [val.strip() for val in output.split()][1:4]
if len(fields) == 3:
fields = [float(field) for field in fields]
free_sphere_params = {'inc_sph_max_dia': fields[0],
'free_sph_max_dia': fields[1],
'inc_sph_along_free_sph_path_max_dia': fields[2]}
return free_sphere_params | 0.000475 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.