text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _ephem_convert_to_seconds_and_microseconds(date):
# utility from unreleased PyEphem 3.6.7.1
"""Converts a PyEphem date into seconds"""
microseconds = int(round(24 * 60 * 60 * 1000000 * date))
seconds, microseconds = divmod(microseconds, 1000000)
seconds -= 2209032000 # difference between epoch 1900 and epoch 1970
return seconds, microseconds | 0.002688 |
def unsurt(surt):
"""
# Simple surt
>>> unsurt('com,example)/')
'example.com/'
# Broken surt
>>> unsurt('com,example)')
'com,example)'
# Long surt
>>> unsurt('suffix,domain,sub,subsub,another,subdomain)/path/file/\
index.html?a=b?c=)/')
'subdomain.another.subsub.sub.domain.suffix/path/file/index.html?a=b?c=)/'
"""
try:
index = surt.index(')/')
parts = surt[0:index].split(',')
parts.reverse()
host = '.'.join(parts)
host += surt[index + 1:]
return host
except ValueError:
# May not be a valid surt
return surt | 0.001585 |
def beamcenterx(self) -> ErrorValue:
"""X (column) coordinate of the beam center, pixel units, 0-based."""
try:
return ErrorValue(self._data['geometry']['beamposy'],
self._data['geometry']['beamposy.err'])
except KeyError:
return ErrorValue(self._data['geometry']['beamposy'],
0.0) | 0.005141 |
def merge_stylesheets(Class, fn, *cssfns):
"""merge the given CSS files, in order, into a single stylesheet. First listed takes priority.
"""
stylesheet = Class(fn=fn)
for cssfn in cssfns:
css = Class(fn=cssfn)
for sel in sorted(css.styles.keys()):
if sel not in stylesheet.styles:
stylesheet.styles[sel] = css.styles[sel]
else:
for prop in [prop for prop in css.styles[sel] if prop not in stylesheet.styles[sel]]:
stylesheet.styles[sel][prop] = css.styles[sel][prop]
return stylesheet | 0.007599 |
def get_instructions(self, cm, size, insn, idx):
"""
:param cm: a ClassManager object
:type cm: :class:`ClassManager` object
:param size: the total size of the buffer
:type size: int
:param insn: a raw buffer where are the instructions
:type insn: string
:param idx: a start address in the buffer
:type idx: int
:rtype: a generator of :class:`Instruction` objects
"""
self.odex = cm.get_odex_format()
max_idx = size * calcsize('=H')
if max_idx > len(insn):
max_idx = len(insn)
# Get instructions
while idx < max_idx:
obj = None
classic_instruction = True
op_value = insn[idx]
#print "%x %x" % (op_value, idx)
#payload instructions or extented/optimized instructions
if (op_value == 0x00 or op_value == 0xff) and ((idx + 2) < max_idx):
op_value = unpack('=H', insn[idx:idx + 2])[0]
# payload instructions ?
if op_value in DALVIK_OPCODES_PAYLOAD:
try:
obj = get_instruction_payload(op_value, insn[idx:])
classic_instruction = False
except struct.error:
warning("error while decoding instruction ...")
elif op_value in DALVIK_OPCODES_EXTENDED_WIDTH:
try:
obj = get_extented_instruction(cm, op_value, insn[idx:])
classic_instruction = False
except struct.error as why:
warning("error while decoding instruction ..." +
why.__str__())
# optimized instructions ?
elif self.odex and (op_value in DALVIK_OPCODES_OPTIMIZED):
obj = get_optimized_instruction(cm, op_value, insn[idx:])
classic_instruction = False
# classical instructions
if classic_instruction:
op_value = insn[idx]
obj = get_instruction(cm, op_value, insn[idx:], self.odex)
# emit instruction
yield obj
idx = idx + obj.get_length() | 0.002575 |
def execution_minutes_for_session(self, session_label):
"""
Given a session label, return the execution minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the execution minutes for the given session.
"""
return self.minutes_in_range(
start_minute=self.execution_time_from_open(
self.schedule.at[session_label, 'market_open'],
),
end_minute=self.execution_time_from_close(
self.schedule.at[session_label, 'market_close'],
),
) | 0.00267 |
def is_cdl(filename):
'''
Quick check for .cdl ascii file
Example:
netcdf sample_file {
dimensions:
name_strlen = 7 ;
time = 96 ;
variables:
float lat ;
lat:units = "degrees_north" ;
lat:standard_name = "latitude" ;
lat:long_name = "station latitude" ;
etc...
:param str filename: Absolute path of file to check
:param str data: First chuck of data from file to check
'''
if os.path.splitext(filename)[-1] != '.cdl':
return False
with open(filename, 'rb') as f:
data = f.read(32)
if data.startswith(b'netcdf') or b'dimensions' in data:
return True
return False | 0.001346 |
def get_object_closure(subject, object_category=None, **kwargs):
"""
Find all terms used to annotate subject plus ancestors
"""
results = search_associations(subject=subject,
object_category=object_category,
select_fields=[],
facet_fields=[M.OBJECT_CLOSURE],
facet_limit=-1,
rows=0,
**kwargs)
return set(results['facet_counts'][M.OBJECT_CLOSURE].keys()) | 0.001733 |
def read_projection_from_fits(fitsfile, extname=None):
"""
Load a WCS or HPX projection.
"""
f = fits.open(fitsfile)
nhdu = len(f)
# Try and get the energy bounds
try:
ebins = find_and_read_ebins(f)
except:
ebins = None
if extname is None:
# If there is an image in the Primary HDU we can return a WCS-based
# projection
if f[0].header['NAXIS'] != 0:
proj = WCS(f[0].header)
return proj, f, f[0]
else:
if f[extname].header['XTENSION'] == 'IMAGE':
proj = WCS(f[extname].header)
return proj, f, f[extname]
elif extname in ['SKYMAP', 'SKYMAP2']:
proj = HPX.create_from_hdu(f[extname], ebins)
return proj, f, f[extname]
elif f[extname].header['XTENSION'] == 'BINTABLE':
try:
if f[extname].header['PIXTYPE'] == 'HEALPIX':
proj = HPX.create_from_hdu(f[extname], ebins)
return proj, f, f[extname]
except:
pass
return None, f, None
# Loop on HDU and look for either an image or a table with HEALPix data
for i in range(1, nhdu):
# if there is an image we can return a WCS-based projection
if f[i].header['XTENSION'] == 'IMAGE':
proj = WCS(f[i].header)
return proj, f, f[i]
elif f[i].header['XTENSION'] == 'BINTABLE':
if f[i].name in ['SKYMAP', 'SKYMAP2']:
proj = HPX.create_from_hdu(f[i], ebins)
return proj, f, f[i]
try:
if f[i].header['PIXTYPE'] == 'HEALPIX':
proj = HPX.create_from_hdu(f[i], ebins)
return proj, f, f[i]
except:
pass
return None, f, None | 0.002185 |
def cable_to_text(cable, include_header):
"""\
Returns the header/content of the cable as text.
"""
if include_header:
return u'\n\n'.join(cable.header, cable.content)
return cable.content | 0.00463 |
def asyncPipeStrreplace(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously replaces text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{
'param': {'value': <match type: 1=first, 2=last, 3=every>},
'find': {'value': <text to find>},
'replace': {'value': <replacement>}
}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **kwargs)
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = yield asyncStarMap(asyncParseResult, parsed)
returnValue(iter(_OUTPUT)) | 0.001168 |
def _mangle_sentences_from_file(input_file):
"""Write participle phrase file"""
try:
with open(input_file, 'r') as f:
# final sentence may not be a complete sentence, save and prepend to next chunk
leftovers = ''
sentence_no = 0
for chunk in read_in_chunks(f): # lazy way of reading our file in case it's large
# prepend leftovers to chunk
chunk = chunk.decode('utf8')
chunk = leftovers + chunk
chunk = chunk.replace(';', '.') # replace semi colons with periods
doc = nlp(chunk)
# last sentence may not be sentence, move to next chunk
sents = [sent.string.strip() for sent in doc.sents]
if len(sents) > 1:
leftovers = sents[-1] + chunk.rpartition(sents[-1])[-1]
sents = sents[:-1]
for sent in sents:
sent = sent.replace('\n', ' ')
sent = sent.replace('\r', ' ')
sent = re.sub( '\s+', ' ', sent ).strip()
if len(sent) < 5:
continue # skip tiny sentences
# add original sentence to database
cursor.execute('''insert into orignal_sentences (sentence,
flesch_reading_ease, flesch_kincaid_grade_level) values (?,
?, ?)''', (sent, textstat.flesch_reading_ease(sent),
textstat.flesch_kincaid_grade(sent)))
og_sent_id = cursor.lastrowid
for mangled_sentence in mangle_agreement(sent):
# add mangled sentence to database
cursor.execute('''insert into mangled_sentences
(original_sentence_id, sentence) values (?, ?)''',
(og_sent_id, mangled_sentence))
conn.commit() # commit all sentences in this chunk
except Exception as e:
print('error on {}'.format(input_file))
print(e) | 0.006676 |
def agp(args):
"""
%prog agp tpffile certificatefile agpfile
Build agpfile from overlap certificates.
Tiling Path File (tpf) is a file that lists the component and the gaps.
It is a three-column file similar to below, also see jcvi.formats.agp.tpf():
telomere chr1 na
AC229737.8 chr1 +
AC202463.29 chr1 +
Note: the orientation of the component is only used as a guide. If the
orientation is derivable from a terminal overlap, it will use it regardless
of what the tpf says.
See jcvi.assembly.goldenpath.certificate() which generates a list of
certificates based on agpfile. At first, it seems counter-productive to
convert first agp to certificates then certificates back to agp.
The certificates provide a way to edit the overlap information, so that the
agpfile can be corrected (without changing agpfile directly).
"""
from jcvi.formats.base import DictFile
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
tpffile, certificatefile, agpfile = args
orientationguide = DictFile(tpffile, valuepos=2)
cert = Certificate(certificatefile)
cert.write_AGP(agpfile, orientationguide=orientationguide) | 0.001546 |
def to_dict(self):
"""
For backwards compatibility
"""
plain_dict = dict()
for k, v in self.items():
if self.__fields__[k].is_list:
if isinstance(self.__fields__[k], ViewModelField):
plain_dict[k] = tuple(vt.to_dict() for vt in v)
continue
plain_dict[k] = tuple(copy.deepcopy(vt) for vt in v)
continue
if isinstance(self.__fields__[k], ViewModelField):
plain_dict[k] = v.to_dict()
continue
plain_dict[k] = copy.deepcopy(v)
return plain_dict | 0.003086 |
def _GetDateValuesWithEpoch(self, number_of_days, date_time_epoch):
"""Determines date values.
Args:
number_of_days (int): number of days since epoch.
date_time_epoch (DateTimeEpoch): date and time of the epoch.
Returns:
tuple[int, int, int]: year, month, day of month.
"""
return self._GetDateValues(
number_of_days, date_time_epoch.year, date_time_epoch.month,
date_time_epoch.day_of_month) | 0.002227 |
def start(self):
'''Starts measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
'''
if self.maxval is None:
self.maxval = self._DEFAULT_MAXVAL
self.num_intervals = max(100, self.term_width)
self.next_update = 0
if self.maxval is not UnknownLength:
if self.maxval < 0: raise ValueError('Value out of range')
self.update_interval = self.maxval / self.num_intervals
self.start_time = self.last_update_time = time.time()
self.update(0)
return self | 0.005168 |
def execute(self, *args, **options):
'''Placing this in execute because then subclass handle() don't have to call super'''
if options['verbose']:
options['verbosity'] = 3
if options['quiet']:
options['verbosity'] = 0
self.verbosity = options.get('verbosity', 1)
super().execute(*args, **options) | 0.008357 |
def btc_tx_sign_all_unsigned_inputs(private_key_info, prev_outputs, unsigned_tx_hex, scriptsig_type=None, segwit=None, **blockchain_opts):
"""
Sign all unsigned inputs with a given key.
Use the given outputs to fund them.
@private_key_info: either a hex private key, or a dict with 'private_keys' and 'redeem_script'
defined as keys.
@prev_outputs: a list of {'out_script': xxx, 'value': xxx} that are in 1-to-1 correspondence with the unsigned inputs in the tx ('value' is in satoshis)
@unsigned_hex_tx: hex transaction with unsigned inputs
Returns: signed hex transaction
"""
if segwit is None:
segwit = get_features('segwit')
txobj = btc_tx_deserialize(unsigned_tx_hex)
inputs = txobj['ins']
if scriptsig_type is None:
scriptsig_type = btc_privkey_scriptsig_classify(private_key_info)
tx_hex = unsigned_tx_hex
prevout_index = 0
# import json
# print ''
# print 'transaction:\n{}'.format(json.dumps(btc_tx_deserialize(unsigned_tx_hex), indent=4, sort_keys=True))
# print 'prevouts:\n{}'.format(json.dumps(prev_outputs, indent=4, sort_keys=True))
# print ''
for i, inp in enumerate(inputs):
do_witness_script = segwit
if inp.has_key('witness_script'):
do_witness_script = True
elif segwit:
# all inputs must receive a witness script, even if it's empty
inp['witness_script'] = ''
if (inp['script'] and len(inp['script']) > 0) or (inp.has_key('witness_script') and len(inp['witness_script']) > 0):
continue
if prevout_index >= len(prev_outputs):
raise ValueError("Not enough prev_outputs ({} given, {} more prev-outputs needed)".format(len(prev_outputs), len(inputs) - prevout_index))
# tx with index i signed with privkey
tx_hex = btc_tx_sign_input(str(unsigned_tx_hex), i, prev_outputs[prevout_index]['out_script'], prev_outputs[prevout_index]['value'], private_key_info, segwit=do_witness_script, scriptsig_type=scriptsig_type)
unsigned_tx_hex = tx_hex
prevout_index += 1
return tx_hex | 0.00698 |
def register_custom_type(
self, cls: type, marshaller: Optional[Callable[[Any], Any]] = default_marshaller,
unmarshaller: Union[Callable[[Any, Any], None],
Callable[[Any], Any], None] = default_unmarshaller, *,
typename: str = None, wrap_state: bool = True) -> None:
"""
Register a marshaller and/or unmarshaller for the given class.
The state object returned by the marshaller and passed to the unmarshaller can be any
serializable type. Usually a dictionary mapping of attribute names to values is used.
.. warning:: Registering marshallers/unmarshallers for any custom type will override any
serializer specific encoding/decoding hooks (respectively) already in place!
:param cls: the class to register
:param marshaller: a callable that takes the object to be marshalled as the argument and
returns a state object
:param unmarshaller: a callable that either:
* takes an uninitialized instance of ``cls`` and its state object as arguments and
restores the state of the object
* takes a state object and returns a new instance of ``cls``
:param typename: a unique identifier for the type (defaults to the ``module:varname``
reference to the class)
:param wrap_state: ``True`` to wrap the marshalled state before serialization so that it
can be recognized later for unmarshalling, ``False`` to serialize it as is
"""
assert check_argument_types()
typename = typename or qualified_name(cls)
if marshaller:
self.marshallers[cls] = typename, marshaller, wrap_state
self.custom_type_codec.register_object_encoder_hook(self)
if unmarshaller and self.custom_type_codec is not None:
target_cls = cls # type: Optional[type]
if len(signature(unmarshaller).parameters) == 1:
target_cls = None
self.unmarshallers[typename] = target_cls, unmarshaller
self.custom_type_codec.register_object_decoder_hook(self) | 0.005991 |
def virsh_version(self,
host_list=None,
remote_user=None,
remote_pass=None,
sudo=False,
sudo_user=None,
sudo_pass=None):
'''
Get the virsh version
'''
host_list, remote_user, remote_pass, \
sudo, sudo_user, sudo_pass = self.get_validated_params(
host_list, remote_user, remote_pass, sudo, sudo_user,
sudo_pass)
result, failed_hosts = self.runner.ansible_perform_operation(
host_list=host_list,
remote_user=remote_user,
remote_pass=remote_pass,
module="command",
module_args="virsh version",
sudo=sudo,
sudo_user=sudo_user,
sudo_pass=sudo_pass)
virsh_result = None
if result['contacted'].keys():
virsh_result = {}
for node in result['contacted'].keys():
nodeobj = result['contacted'][node]
jsonoutput = rex.parse_lrvalue_string(nodeobj['stdout'], ":")
virsh_result[node] = {}
virsh_result[node]['result'] = jsonoutput
return virsh_result | 0.007126 |
def create(graph, label_field,
threshold=1e-3,
weight_field='',
self_weight=1.0,
undirected=False,
max_iterations=None,
_single_precision=False,
_distributed='auto',
verbose=True):
"""
Given a weighted graph with observed class labels of a subset of vertices,
infer the label probability for the unobserved vertices using the
"label propagation" algorithm.
The algorithm iteratively updates the label probability of current vertex
as a weighted sum of label probability of self and the neighboring vertices
until converge. See
:class:`turicreate.label_propagation.LabelPropagationModel` for the details
of the algorithm.
Notes: label propagation works well with small number of labels, i.e. binary
labels, or less than 1000 classes. The toolkit will throw error
if the number of classes exceeds the maximum value (1000).
Parameters
----------
graph : SGraph
The graph on which to compute the label propagation.
label_field: str
Vertex field storing the initial vertex labels. The values in
must be [0, num_classes). None values indicate unobserved vertex labels.
threshold : float, optional
Threshold for convergence, measured in the average L2 norm
(the sum of squared values) of the delta of each vertex's
label probability vector.
max_iterations: int, optional
The max number of iterations to run. Default is unlimited.
If set, the algorithm terminates when either max_iterations
or convergence threshold is reached.
weight_field: str, optional
Vertex field for edge weight. If empty, all edges are assumed
to have unit weight.
self_weight: float, optional
The weight for self edge.
undirected: bool, optional
If true, treat each edge as undirected, and propagates label in
both directions.
_single_precision : bool, optional
If true, running label propagation in single precision. The resulting
probability values may less accurate, but should run faster
and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LabelPropagationModel
References
----------
- Zhu, X., & Ghahramani, Z. (2002). `Learning from labeled and unlabeled data
with label propagation <http://www.cs.cmu.edu/~zhuxj/pub/CMU-CALD-02-107.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.label_propagation.LabelPropagationModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz',
... format='snap')
# Initialize random classes for a subset of vertices
# Leave the unobserved vertices with None label.
>>> import random
>>> def init_label(vid):
... x = random.random()
... if x < 0.2:
... return 0
... elif x > 0.9:
... return 1
... else:
... return None
>>> g.vertices['label'] = g.vertices['__id'].apply(init_label, int)
>>> m = turicreate.label_propagation.create(g, label_field='label')
We can obtain for each vertex the predicted label and the probability of
each label in the graph ``g`` using:
>>> labels = m['labels'] # SFrame
>>> labels
+------+-------+-----------------+-------------------+----------------+
| __id | label | predicted_label | P0 | P1 |
+------+-------+-----------------+-------------------+----------------+
| 5 | 1 | 1 | 0.0 | 1.0 |
| 7 | None | 0 | 0.8213214997 | 0.1786785003 |
| 8 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 10 | None | 0 | 0.534984718273 | 0.465015281727 |
| 27 | None | 0 | 0.752801638549 | 0.247198361451 |
| 29 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 33 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 47 | 0 | 0 | 1.0 | 0.0 |
| 50 | None | 0 | 0.788279032657 | 0.211720967343 |
| 52 | None | 0 | 0.666666666667 | 0.333333333333 |
+------+-------+-----------------+-------------------+----------------+
[36692 rows x 5 columns]
See Also
--------
LabelPropagationModel
"""
from turicreate._cython.cy_server import QuietProgress
_raise_error_if_not_of_type(label_field, str)
_raise_error_if_not_of_type(weight_field, str)
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
if graph.vertices[label_field].dtype != int:
raise TypeError('label_field %s must be integer typed.' % label_field)
opts = {'label_field': label_field,
'threshold': threshold,
'weight_field': weight_field,
'self_weight': self_weight,
'undirected': undirected,
'max_iterations': max_iterations,
'single_precision': _single_precision,
'graph': graph.__proxy__}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.label_propagation.create(opts)
model = params['model']
return LabelPropagationModel(model) | 0.001069 |
def get_qpimage_raw(self, idx):
"""Return QPImage without background correction"""
ds = self._get_dataset(idx)
qpi = ds.get_qpimage_raw()
qpi["identifier"] = self.get_identifier(idx)
return qpi | 0.008584 |
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2016-11-04 - Written - Bovy (UofT/CCA)
"""
x,y,z= self._compute_xyz(R,phi,z,t)
zc= numpy.sqrt(z**2.+self._c2)
bzc2= (self._b+zc)**2.
bigA= self._b*y**2.+(self._b+3.*zc)*bzc2
bigC= y**2.+bzc2
return self._c2/24./numpy.pi/self._a/bigC**2./zc**3.\
*((x+self._a)*(3.*bigA*bigC+(2.*bigA+self._b*bigC)*(x+self._a)**2.)\
/(bigC+(x+self._a)**2.)**1.5\
-(x-self._a)*(3.*bigA*bigC+(2.*bigA+self._b*bigC)*(x-self._a)**2.)\
/(bigC+(x-self._a)**2.)**1.5) | 0.030803 |
def create(self, repo_slug=None, key=None, label=None):
""" Associate an ssh key with your repo and return it.
"""
key = '%s' % key
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('SET_DEPLOY_KEY',
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('POST',
url,
auth=self.bitbucket.auth,
key=key,
label=label) | 0.003091 |
def neighbours(healpix_index, nside, order='ring'):
"""
Find all the HEALPix pixels that are the neighbours of a HEALPix pixel
Parameters
----------
healpix_index : `~numpy.ndarray`
Array of HEALPix pixels
nside : int
Number of pixels along the side of each of the 12 top-level HEALPix tiles
order : { 'nested' | 'ring' }
Order of HEALPix pixels
Returns
-------
neigh : `~numpy.ndarray`
Array giving the neighbours starting SW and rotating clockwise. This has
one extra dimension compared to ``healpix_index`` - the first dimension -
which is set to 8. For example if healpix_index has shape (2, 3),
``neigh`` has shape (8, 2, 3).
"""
_validate_nside(nside)
nside = np.asarray(nside, dtype=np.intc)
if _validate_order(order) == 'ring':
func = _core.neighbours_ring
else: # _validate_order(order) == 'nested'
func = _core.neighbours_nested
return np.stack(func(healpix_index, nside)) | 0.003902 |
def new_dataset(args):
"""
lexibank new-dataset OUTDIR [ID]
"""
if not args.args:
raise ParserError('you must specify an existing directory')
outdir = Path(args.args.pop(0))
if not outdir.exists():
raise ParserError('you must specify an existing directory')
id_pattern = re.compile('[a-z_0-9]+$')
md = {}
if args.args:
md['id'] = args.args.pop(0)
else:
md['id'] = input('Dataset ID: ')
while not id_pattern.match(md['id']):
print('dataset id must only consist of lowercase ascii letters, digits and _ (underscore)!')
md['id'] = input('Dataset ID: ')
outdir = outdir / md['id']
if not outdir.exists():
outdir.mkdir()
for key in ['title', 'url', 'license', 'conceptlist', 'citation']:
md[key] = input('Dataset {0}: '.format(key))
# check license!
# check conceptlist!
for path in Path(pylexibank.__file__).parent.joinpath('dataset_template').iterdir():
if path.is_file():
if path.suffix in ['.pyc']:
continue # pragma: no cover
target = path.name
content = read_text(path)
if '+' in path.name:
target = re.sub(
'\+([a-z]+)\+',
lambda m: '{' + m.groups()[0] + '}',
path.name
).format(**md)
if target.endswith('_tmpl'):
target = target[:-5]
content = content.format(**md)
write_text(outdir / target, content)
else:
target = outdir / path.name
if target.exists():
shutil.rmtree(str(target))
shutil.copytree(str(path), str(target))
del md['id']
jsonlib.dump(md, outdir / 'metadata.json', indent=4) | 0.00275 |
def int(self, item, default=None):
""" Return value of key as an int
:param item: key of value to transform
:param default: value to return if item does not exist
:return: int of value
"""
try:
item = self.__getattr__(item)
except AttributeError as err:
if default is not None:
return default
raise err
return int(item) | 0.004587 |
def is_bit_mask(enumeration, potential_mask):
"""
A utility function that checks if the provided value is a composite bit
mask of enumeration values in the specified enumeration class.
Args:
enumeration (class): One of the mask enumeration classes found in this
file. These include:
* Cryptographic Usage Mask
* Protection Storage Mask
* Storage Status Mask
potential_mask (int): A potential bit mask composed of enumeration
values belonging to the enumeration class.
Returns:
True: if the potential mask is a valid bit mask of the mask enumeration
False: otherwise
"""
if not isinstance(potential_mask, six.integer_types):
return False
mask_enumerations = (
CryptographicUsageMask,
ProtectionStorageMask,
StorageStatusMask
)
if enumeration not in mask_enumerations:
return False
mask = 0
for value in [e.value for e in enumeration]:
if (value & potential_mask) == value:
mask |= value
if mask != potential_mask:
return False
return True | 0.000854 |
def from_inline(cls: Type[CertificationType], version: int, currency: str, blockhash: Optional[str],
inline: str) -> CertificationType:
"""
Return Certification instance from inline document
Only self.pubkey_to is populated.
You must populate self.identity with an Identity instance to use raw/sign/signed_raw methods
:param version: Version of document
:param currency: Name of the currency
:param blockhash: Hash of the block
:param inline: Inline document
:return:
"""
cert_data = Certification.re_inline.match(inline)
if cert_data is None:
raise MalformedDocumentError("Certification ({0})".format(inline))
pubkey_from = cert_data.group(1)
pubkey_to = cert_data.group(2)
blockid = int(cert_data.group(3))
if blockid == 0 or blockhash is None:
timestamp = BlockUID.empty()
else:
timestamp = BlockUID(blockid, blockhash)
signature = cert_data.group(4)
return cls(version, currency, pubkey_from, pubkey_to, timestamp, signature) | 0.005263 |
def auth_list(**kwargs):
"""
Shows available authorization groups.
"""
ctx = Context(**kwargs)
ctx.execute_action('auth:group:list', **{
'storage': ctx.repo.create_secure_service('storage'),
}) | 0.004444 |
def assume_script(self) -> 'Language':
"""
Fill in the script if it's missing, and if it can be assumed from the
language subtag. This is the opposite of `simplify_script`.
>>> Language.make(language='en').assume_script()
Language.make(language='en', script='Latn')
>>> Language.make(language='yi').assume_script()
Language.make(language='yi', script='Hebr')
>>> Language.make(language='yi', script='Latn').assume_script()
Language.make(language='yi', script='Latn')
This fills in nothing when the script cannot be assumed -- such as when
the language has multiple scripts, or it has no standard orthography:
>>> Language.make(language='sr').assume_script()
Language.make(language='sr')
>>> Language.make(language='eee').assume_script()
Language.make(language='eee')
It also dosn't fill anything in when the language is unspecified.
>>> Language.make(region='US').assume_script()
Language.make(region='US')
"""
if self._assumed is not None:
return self._assumed
if self.language and not self.script:
try:
self._assumed = self.update_dict({'script': DEFAULT_SCRIPTS[self.language]})
except KeyError:
self._assumed = self
else:
self._assumed = self
return self._assumed | 0.002085 |
def issue(self, CorpNum, MgtKeyType, MgtKey, Memo=None, EmailSubject=None, ForceIssue=False, UserID=None):
""" ๋ฐํ
args
CorpNum : ํ์ ์ฌ์
์ ๋ฒํธ
MgtKeyType : ๊ด๋ฆฌ๋ฒํธ ์ ํ one of ['SELL','BUY','TRUSTEE']
MgtKey : ํํธ๋ ๊ด๋ฆฌ๋ฒํธ
Memo : ์ฒ๋ฆฌ ๋ฉ๋ชจ
EmailSubject : ๋ฐํ๋ฉ์ผ ์ด๋ฉ์ผ ์ ๋ชฉ
ForceIssue : ์ง์ฐ๋ฐํ ์ธ๊ธ๊ณ์ฐ์ ๊ฐ์ ๋ฐํ ์ฌ๋ถ.
UserID : ํ๋น ํ์์์ด๋
return
์ฒ๋ฆฌ๊ฒฐ๊ณผ. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "๊ด๋ฆฌ๋ฒํธ ํํ๊ฐ ์ฌ๋ฐ๋ฅด์ง ์์ต๋๋ค.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "๊ด๋ฆฌ๋ฒํธ๊ฐ ์
๋ ฅ๋์ง ์์์ต๋๋ค.")
req = {"forceIssue": ForceIssue}
if Memo != None and Memo != '':
req["memo"] = Memo
if EmailSubject != None and EmailSubject != '':
req["emailSubject"] = EmailSubject
postData = self._stringtify(req)
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "ISSUE") | 0.006019 |
def normalize(self):
"Return my probabilities; must be down to one variable."
assert len(self.vars) == 1
return ProbDist(self.vars[0],
dict((k, v) for ((k,), v) in self.cpt.items())) | 0.008696 |
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_ | 0.002488 |
def check_ns_run_members(run):
"""Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties.
"""
run_keys = list(run.keys())
# Mandatory keys
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert key in run_keys
run_keys.remove(key)
# Optional keys
for key in ['output']:
try:
run_keys.remove(key)
except ValueError:
pass
# Check for unexpected keys
assert not run_keys, 'Unexpected keys in ns_run: ' + str(run_keys)
# Check type of mandatory members
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert isinstance(run[key], np.ndarray), (
key + ' is type ' + type(run[key]).__name__)
# check shapes of keys
assert run['logl'].ndim == 1
assert run['logl'].shape == run['nlive_array'].shape
assert run['logl'].shape == run['thread_labels'].shape
assert run['theta'].ndim == 2
assert run['logl'].shape[0] == run['theta'].shape[0] | 0.000809 |
def scale_axes_from_data(self):
"""Restrict data limits for Y-axis based on what you can see
"""
# get tight limits for X-axis
if self.args.xmin is None:
self.args.xmin = min(fs.xspan[0] for fs in self.spectra)
if self.args.xmax is None:
self.args.xmax = max(fs.xspan[1] for fs in self.spectra)
# autoscale view for Y-axis
cropped = [fs.crop(self.args.xmin, self.args.xmax) for
fs in self.spectra]
ymin = min(fs.value.min() for fs in cropped)
ymax = max(fs.value.max() for fs in cropped)
self.plot.gca().yaxis.set_data_interval(ymin, ymax, ignore=True)
self.plot.gca().autoscale_view(scalex=False) | 0.002743 |
def show_fibrechannel_interface_info_output_show_fibrechannel_interface_show_fibrechannel_info_port_interface(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fibrechannel_interface_info = ET.Element("show_fibrechannel_interface_info")
config = show_fibrechannel_interface_info
output = ET.SubElement(show_fibrechannel_interface_info, "output")
show_fibrechannel_interface = ET.SubElement(output, "show-fibrechannel-interface")
portsgroup_rbridgeid_key = ET.SubElement(show_fibrechannel_interface, "portsgroup-rbridgeid")
portsgroup_rbridgeid_key.text = kwargs.pop('portsgroup_rbridgeid')
show_fibrechannel_info = ET.SubElement(show_fibrechannel_interface, "show-fibrechannel-info")
port_index_key = ET.SubElement(show_fibrechannel_info, "port-index")
port_index_key.text = kwargs.pop('port_index')
port_interface = ET.SubElement(show_fibrechannel_info, "port-interface")
port_interface.text = kwargs.pop('port_interface')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006926 |
def _append(self, menu):
'''append this menu item to a menu'''
menu.Append(self.id(), self.name, self.description) | 0.015385 |
def _gen(self, optimized, splitstring):
"""Generates a new random object generated from the nonterminal
Args:
optimized (bool): mode of operation - if enabled not all
CNF rules are included (mitigate O(n^3))
splitstring (bool): A boolean for enabling or disabling
Returns:
str: The generated string
"""
# Define Dictionary that holds resolved rules
# (only in form A -> terminals sequence)
self.resolved = {}
# First update Resolved dictionary by adding rules
# that contain only terminals (resolved rules)
for nt in self.grammar.grammar_nonterminals_map:
for i in self.grammar.grammar_nonterminals_map[nt]:
if self.grammar.grammar_rules[i][0] not in self.resolved\
and not isinstance(self.grammar.grammar_rules[i][1], (set, tuple)):
if self.grammar.grammar_rules[i][1] != '@empty_set' \
and self.grammar.grammar_rules[i][1] in self.grammar.grammar_terminals:
if splitstring:
self.resolved[
self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1]
else:
if self.grammar.grammar_rules[i][1] == '&':
self.resolved[self.grammar.grammar_rules[i][0]] = ' '
else:
self.resolved[
self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1]
# print 'ResolvingA '+self.g.Rules[i][0]+": "+
# self.g.Rules[i][1]
if self._checkfinal(self.grammar.grammar_rules[i][0]):
return self.resolved[self.grammar.grammar_rules[i][0]]
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
if self.grammar.grammar_rules[i][1] == '@empty_set':
self.resolved[self.grammar.grammar_rules[i][0]] = ''
# print 'ResolvingB '+self.g.Rules[i][0]+": "
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
if optimized and self._check_self_to_empty(
self.grammar.grammar_rules[i][1]):
self.resolved[self.grammar.grammar_rules[i][0]] = ''
# print 'ResolvingC '+self.g.Rules[i][0]+": "
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
# Then try to use the rules from Resolved dictionary and check
# if there is another rule that can be resolved.
# This should be done in a while loop
change = 1
while change:
change = 0
if not change:
ret = self._check_self_nonterminals(optimized)
if ret == 1:
change = 1
elif ret != 0:
return ret
if not change:
while not change and len(self.bfs_queue) > 0:
myntr = self.bfs_queue.pop()
ret = self._check_self_replicate(myntr)
if ret == 1:
change = 1
elif ret != 0:
return ret
if optimized and self._check_intemediate(
myntr, self.maxstate):
change = 1
break | 0.003147 |
def plot_ecg_pan_tompkins_steps(time, orig_ecg, pre_process_ecg, sampling_rate, titles):
"""
-----
Brief
-----
With this plotting function it will be possible to plot simultaneously (in pairs) "Original"/
"Filtered"/"Differentiated"/"Rectified"/"Integrated" ECG signals used at "Pan-Tompkins R Peaks
Detection Algorithm".
-----------
Description
-----------
Function intended to generate a Bokeh figure with 1x2 format, being 1 the of rows and 2 the number
of columns.
At the first column is plotted the ECG signal resulting from pre-process step i while in the second
column it is presented the ECG signal resulting from pre-process step i+1.
Applied in the Notebook titled "Event Detection - R Peaks (ECG)".
----------
Parameters
----------
time : list
List containing the time-axis sequence of values.
orig_ecg : list
Sequence of sampled values (Original ECG).
pre_process_ecg : list
Sequence of sampled values (Pre-Processed ECG).
sampling_rate : int
Acquisition sampling rate (Hz)
titles : list
List containing the title of each subplot.
"""
if len(titles) == 2:
# Representation of the output of Step 1 of Pan-Tompkins R-Peak Detection Algorithm.
# List that store the figure handler
list_figures_1 = [[]]
# Plotting of Original Signal
list_figures_1[-1].append(figure(x_axis_label='Time (s)', y_axis_label='Raw Data',
title=titles[0], **opensignals_kwargs("figure")))
list_figures_1[-1][-1].line(time[:sampling_rate], orig_ecg[:sampling_rate], **opensignals_kwargs("line"))
# Plotting of Filtered Signal
list_figures_1[-1].append(figure(x_axis_label='Time (s)', y_axis_label='Raw Data',
title=titles[1], **opensignals_kwargs("figure")))
list_figures_1[-1][-1].line(time[:sampling_rate], pre_process_ecg[:sampling_rate], **opensignals_kwargs("line"))
# Grid-Plot.
opensignals_style([item for sublist in list_figures_1 for item in sublist])
grid_plot_1 = gridplot(list_figures_1, **opensignals_kwargs("gridplot"))
show(grid_plot_1)
else:
raise RuntimeError("The field 'title' must be a list of strings with size 2 !") | 0.006743 |
def read_status(self, num_bytes=2):
"""Read up to 24 bits (num_bytes) of SPI flash status register contents
via RDSR, RDSR2, RDSR3 commands
Not all SPI flash supports all three commands. The upper 1 or 2
bytes may be 0xFF.
"""
SPIFLASH_RDSR = 0x05
SPIFLASH_RDSR2 = 0x35
SPIFLASH_RDSR3 = 0x15
status = 0
shift = 0
for cmd in [SPIFLASH_RDSR, SPIFLASH_RDSR2, SPIFLASH_RDSR3][0:num_bytes]:
status += self.run_spiflash_command(cmd, read_bits=8) << shift
shift += 8
return status | 0.006711 |
def calcPeptideMass(peptide, **kwargs):
"""Calculate the mass of a peptide.
:param aaMass: A dictionary with the monoisotopic masses of amino acid
residues, by default :attr:`maspy.constants.aaMass`
:param aaModMass: A dictionary with the monoisotopic mass changes of
modications, by default :attr:`maspy.constants.aaModMass`
:param elementMass: A dictionary with the masses of chemical elements, by
default ``pyteomics.mass.nist_mass``
:param peptide: peptide sequence, modifications have to be written in the
format "[modificationId]" and "modificationId" has to be present in
:attr:`maspy.constants.aaModMass`
#TODO: change to a more efficient way of calculating the modified mass, by
first extracting all present modifications and then looking up their masses.
"""
aaMass = kwargs.get('aaMass', maspy.constants.aaMass)
aaModMass = kwargs.get('aaModMass', maspy.constants.aaModMass)
elementMass = kwargs.get('elementMass', pyteomics.mass.nist_mass)
addModMass = float()
unmodPeptide = peptide
for modId, modMass in viewitems(aaModMass):
modSymbol = '[' + modId + ']'
numMod = peptide.count(modSymbol)
if numMod > 0:
unmodPeptide = unmodPeptide.replace(modSymbol, '')
addModMass += modMass * numMod
if unmodPeptide.find('[') != -1:
print(unmodPeptide)
raise Exception('The peptide contains modification, ' +
'not present in maspy.constants.aaModMass'
)
unmodPeptideMass = sum(aaMass[i] for i in unmodPeptide)
unmodPeptideMass += elementMass['H'][0][0]*2 + elementMass['O'][0][0]
modPeptideMass = unmodPeptideMass + addModMass
return modPeptideMass | 0.001122 |
def upload_file(target_filepath, metadata, access_token, base_url=OH_BASE_URL,
remote_file_info=None, project_member_id=None,
max_bytes=MAX_FILE_DEFAULT):
"""
Upload a file from a local filepath using the "direct upload" API.
To learn more about this API endpoint see:
* https://www.openhumans.org/direct-sharing/on-site-data-upload/
* https://www.openhumans.org/direct-sharing/oauth2-data-upload/
:param target_filepath: This field is the filepath of the file to be
uploaded
:param metadata: This field is a python dictionary with keys filename,
description and tags for single user upload and filename,
project member id, description and tags for multiple user upload.
:param access_token: This is user specific access token/master token.
:param base_url: It is this URL `https://www.openhumans.org`.
:param remote_file_info: This field is for for checking if a file with
matching name and file size already exists. Its default value is none.
:param project_member_id: This field is the list of project member id of
all members of a project. Its default value is None.
:param max_bytes: This field is the maximum file size a user can upload.
It's default value is 128m.
"""
with open(target_filepath, 'rb') as stream:
filename = os.path.basename(target_filepath)
return upload_stream(stream, filename, metadata, access_token,
base_url, remote_file_info, project_member_id,
max_bytes, file_identifier=target_filepath) | 0.000614 |
def coverageInfo(self):
"""
Return information about the bases found at each location in our title
sequence.
@return: A C{dict} whose keys are C{int} subject offsets and whose
values are unsorted lists of (score, base) 2-tuples, giving all the
bases from reads that matched the subject at subject location,
along with the bit score of the matching read.
"""
result = defaultdict(list)
for titleAlignment in self:
for hsp in titleAlignment.hsps:
score = hsp.score.score
for (subjectOffset, base, _) in titleAlignment.read.walkHSP(
hsp, includeWhiskers=False):
result[subjectOffset].append((score, base))
return result | 0.002478 |
def __struct_params_s(obj, separator=', ', f=repr, fmt='%s = %s'):
"""method wrapper for printing all elements of a struct"""
s = separator.join([__single_param(obj, n, f, fmt) for n in dir(obj) if __inc_param(obj, n)])
return s | 0.008333 |
def get_cancer_types(cancer_filter=None):
"""Return a list of cancer types, optionally filtered.
Parameters
----------
cancer_filter : Optional[str]
A string used to filter cancer types. Its value is the name or
part of the name of a type of cancer. Example: "melanoma",
"pancreatic", "non-small cell lung"
Returns
-------
type_ids : list[str]
A list of cancer types matching the filter.
Example: for cancer_filter="pancreatic", the result includes
"panet" (neuro-endocrine) and "paad" (adenocarcinoma)
"""
data = {'cmd': 'getTypesOfCancer'}
df = send_request(**data)
res = _filter_data_frame(df, ['type_of_cancer_id'], 'name', cancer_filter)
type_ids = list(res['type_of_cancer_id'].values())
return type_ids | 0.001233 |
def _set_replicator(self, v, load=False):
"""
Setter method for replicator, mapped from YANG variable /tunnel_settings/system/tunnel/replicator (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_replicator is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_replicator() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=replicator.replicator, is_container='container', presence=False, yang_name="replicator", rest_name="replicator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NSX replicator tunnel related settings', u'hidden': u'debug', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """replicator must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=replicator.replicator, is_container='container', presence=False, yang_name="replicator", rest_name="replicator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NSX replicator tunnel related settings', u'hidden': u'debug', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""",
})
self.__replicator = t
if hasattr(self, '_set'):
self._set() | 0.005672 |
def run_direct(self, **kwargs):
"""
Run the motor at the duty cycle specified by `duty_cycle_sp`.
Unlike other run commands, changing `duty_cycle_sp` while running *will*
take effect immediately.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = self.COMMAND_RUN_DIRECT | 0.008403 |
def df_quantile(df, nb=100):
"""Returns the nb quantiles for datas in a dataframe
"""
quantiles = np.linspace(0, 1., nb)
res = pd.DataFrame()
for q in quantiles:
res = res.append(df.quantile(q), ignore_index=True)
return res | 0.003906 |
def calc_gradient_norm_for_replicates(self,
replicates='bootstrap',
ridge=None,
constrained_pos=None,
weights=None):
"""
Calculate the Euclidean-norm of the gradient of one's replicates, given
one's dataset.
Parameters
----------
replicates : str in {'bootstrap', 'jackknife'}.
Denotes which set of replicates should have their log-likelihoods
calculated.
ridge : float or None, optional.
Denotes the ridge penalty used when estimating the replicates, and
to be used when calculating the gradient. If None, no ridge penalty
is used. Default == None.
constrained_pos : list or None, optional.
Denotes the positions of the array of estimated parameters that are
not to change from their initial values. If a list is passed, the
elements are to be integers where no such integer is greater than
`self.mle_params` Default == None.
weights : 1D ndarray or None, optional.
Allows for the calculation of weighted log-likelihoods. The weights
can represent various things. In stratified samples, the weights
may be the proportion of the observations in a given strata for a
sample in relation to the proportion of observations in that strata
in the population. In latent class models, the weights may be the
probability of being a particular class.
Returns
-------
log_likelihoods : 1D ndarray.
Each element stores the log-likelihood of the associated parameter
values on the model object's dataset. The log-likelihoods are also
stored on the `replicates + '_log_likelihoods'` attribute.
"""
# Check the validity of the kwargs
ensure_replicates_kwarg_validity(replicates)
# Create the estimation object
estimation_obj =\
create_estimation_obj(self.model_obj,
self.mle_params.values,
ridge=ridge,
constrained_pos=constrained_pos,
weights=weights)
# Prepare the estimation object to calculate the gradients
if hasattr(estimation_obj, "set_derivatives"):
estimation_obj.set_derivatives()
# Get the array of parameter replicates
replicate_array = getattr(self, replicates + "_replicates").values
# Determine the number of replicates
num_reps = replicate_array.shape[0]
# Initialize an empty array to store the gradient norms
gradient_norms = np.empty((num_reps,), dtype=float)
# Create an iterable for iteration
iterable_for_iteration = PROGRESS(xrange(num_reps),
desc="Calculating Gradient Norms",
total=num_reps)
# Iterate through the rows of the replicates and calculate and store
# the gradient norm for each replicated parameter vector.
for row in iterable_for_iteration:
current_params = replicate_array[row]
gradient = estimation_obj.convenience_calc_gradient(current_params)
gradient_norms[row] = np.linalg.norm(gradient)
return gradient_norms | 0.001688 |
def newDocPI(self, name, content):
"""Creation of a processing instruction element. """
ret = libxml2mod.xmlNewDocPI(self._o, name, content)
if ret is None:raise treeError('xmlNewDocPI() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | 0.014599 |
def obfn_g1(self, Y1):
r"""Compute :math:`g_1(\mathbf{y_1})` component of ADMM objective
function.
"""
return np.linalg.norm((self.Pcn(Y1) - Y1)) | 0.011236 |
async def _dataobject_update_detect(self, _initialkeys, _savedresult):
"""
Coroutine that wait for retrieved value update notification
"""
def expr(newvalues, updatedvalues):
if any(v.getkey() in _initialkeys for v in updatedvalues if v is not None):
return True
else:
return self.shouldupdate(newvalues, updatedvalues)
while True:
updatedvalues, _ = await multiwaitif(_savedresult, self, expr, True)
if not self._updatedset:
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.DATAUPDATED))
self._updatedset.update(updatedvalues) | 0.007082 |
def has_basis_notes(family, data_dir=None):
'''Check if notes exist for a given basis set
Returns True if they exist, false otherwise
'''
file_path = _basis_notes_path(family, data_dir)
return os.path.isfile(file_path) | 0.004167 |
def get_list(self, size=100, startIndex=0, searchText="", sortProperty="", sortOrder='ASC', status='Active,Pending'):
"""
Request service locations
Returns
-------
dict
"""
url = urljoin(BASEURL, "sites", "list")
params = {
'api_key': self.token,
'size': size,
'startIndex': startIndex,
'sortOrder': sortOrder,
'status': status
}
if searchText:
params['searchText'] = searchText
if sortProperty:
params['sortProperty'] = sortProperty
r = requests.get(url, params)
r.raise_for_status()
return r.json() | 0.004274 |
def create(self, **kwargs):
"""
Creates a new statement matching the keyword arguments specified.
Returns the created statement.
"""
Statement = self.get_model('statement')
Tag = self.get_model('tag')
session = self.Session()
tags = set(kwargs.pop('tags', []))
if 'search_text' not in kwargs:
kwargs['search_text'] = self.tagger.get_bigram_pair_string(kwargs['text'])
if 'search_in_response_to' not in kwargs:
in_response_to = kwargs.get('in_response_to')
if in_response_to:
kwargs['search_in_response_to'] = self.tagger.get_bigram_pair_string(in_response_to)
statement = Statement(**kwargs)
for tag_name in tags:
tag = session.query(Tag).filter_by(name=tag_name).first()
if not tag:
# Create the tag
tag = Tag(name=tag_name)
statement.tags.append(tag)
session.add(statement)
session.flush()
session.refresh(statement)
statement_object = self.model_to_object(statement)
self._session_finish(session)
return statement_object | 0.003331 |
def file_or_token(value):
"""
If value is a file path and the file exists its contents are stripped and returned,
otherwise value is returned.
"""
if isfile(value):
with open(value) as fd:
return fd.read().strip()
if any(char in value for char in '/\\.'):
# This chars will never be in a token value, but may be in a path
# The error message will be handled by the parser
raise ValueError()
return value | 0.004193 |
def prepare_array(data, masked=True, nodata=0, dtype="int16"):
"""
Turn input data into a proper array for further usage.
Outut array is always 3-dimensional with the given data type. If the output
is masked, the fill_value corresponds to the given nodata value and the
nodata value will be burned into the data array.
Parameters
----------
data : array or iterable
array (masked or normal) or iterable containing arrays
nodata : integer or float
nodata value (default: 0) used if input is not a masked array and
for output array
masked : bool
return a NumPy Array or a NumPy MaskedArray (default: True)
dtype : string
data type of output array (default: "int16")
Returns
-------
array : array
"""
# input is iterable
if isinstance(data, (list, tuple)):
return _prepare_iterable(data, masked, nodata, dtype)
# special case if a 2D single band is provided
elif isinstance(data, np.ndarray) and data.ndim == 2:
data = ma.expand_dims(data, axis=0)
# input is a masked array
if isinstance(data, ma.MaskedArray):
return _prepare_masked(data, masked, nodata, dtype)
# input is a NumPy array
elif isinstance(data, np.ndarray):
if masked:
return ma.masked_values(data.astype(dtype, copy=False), nodata, copy=False)
else:
return data.astype(dtype, copy=False)
else:
raise ValueError(
"data must be array, masked array or iterable containing arrays."
) | 0.001269 |
def _newRepresentation(self, index, newIndex):
"""
Return a new representation for newIndex that overlaps with the
representation at index by exactly w-1 bits
"""
newRepresentation = self.bucketMap[index].copy()
# Choose the bit we will replace in this representation. We need to shift
# this bit deterministically. If this is always chosen randomly then there
# is a 1 in w chance of the same bit being replaced in neighboring
# representations, which is fairly high
ri = newIndex % self.w
# Now we choose a bit such that the overlap rules are satisfied.
newBit = self.random.getUInt32(self.n)
newRepresentation[ri] = newBit
while newBit in self.bucketMap[index] or \
not self._newRepresentationOK(newRepresentation, newIndex):
self.numTries += 1
newBit = self.random.getUInt32(self.n)
newRepresentation[ri] = newBit
return newRepresentation | 0.005359 |
def delete_port_binding(self, port, host):
"""Enqueue port binding delete"""
if not self.get_instance_type(port):
return
for pb_key in self._get_binding_keys(port, host):
pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE,
a_const.DELETE)
self.provision_queue.put(pb_res) | 0.005348 |
def connect_engine(self):
"""
Establish a connection to the database.
Provides simple error handling for fatal errors.
Returns:
True, if we could establish a connection, else False.
"""
try:
self.connection = self.engine.connect()
return True
except sa.exc.OperationalError as opex:
LOG.fatal("Could not connect to the database. The error was: '%s'",
str(opex))
return False | 0.003922 |
def __branch_point_dfs_recursive(u, large_n, b, stem, dfs_data):
"""A recursive implementation of the BranchPtDFS function, as defined on page 14 of the paper."""
first_vertex = dfs_data['adj'][u][0]
large_w = wt(u, first_vertex, dfs_data)
if large_w % 2 == 0:
large_w += 1
v_I = 0
v_II = 0
for v in [v for v in dfs_data['adj'][u] if wt(u, v, dfs_data) <= large_w]:
stem[u] = v # not in the original paper, but a logical extension based on page 13
if a(v, dfs_data) == u: # uv is a tree edge
large_n[v] = 0
if wt(u, v, dfs_data) % 2 == 0:
v_I = v
else:
b_u = b[u]
l2_v = L2(v, dfs_data)
#if l2_v > b_u:
# If this is true, then we're not on a branch at all
# continue
if l2_v < b_u:
large_n[v] = 1
elif b_u != 1:
#print stem
#print dfs_data['lowpoint_2_lookup']
#print b
xnode = stem[l2_v]
if large_n[xnode] != 0:
large_n[v] = large_n[xnode] + 1
elif dfs_data['graph'].adjacent(u, L1(v, dfs_data)):
large_n[v] = 2
else:
large_n[v] = large_n[u]
if large_n[v] % 2 == 0:
v_II = v
break # Goto 1
if v_II != 0:
# Move v_II to head of Adj[u]
dfs_data['adj'][u].remove(v_II)
dfs_data['adj'][u].insert(0, v_II)
elif v_I != 0:
# Move v_I to head of Adj[u]
dfs_data['adj'][u].remove(v_I)
dfs_data['adj'][u].insert(0, v_I)
first_time = True
for v in dfs_data['adj'][u]:
if a(v, dfs_data) == u:
b[v] = u
if first_time:
b[v] = b[u]
elif wt(u, v, dfs_data) % 2 == 0:
large_n[v] = 0
else:
large_n[v] = 1
stem[u] = v
__branch_point_dfs_recursive(v, large_n, b, stem, dfs_data)
first_time = False
return | 0.005 |
def get_help(obj, env, subcmds):
"""Interpolate complete help doc of given object
Assumption that given object as a specific interface:
obj.__doc__ is the basic help object.
obj.get_actions_titles() returns the subcommand if any.
"""
doc = txt.dedent(obj.__doc__ or "")
env = env.copy() ## get a local copy
doc = doc.strip()
if not re.search(r"^usage:\s*$", doc, flags=re.IGNORECASE | re.MULTILINE):
doc += txt.dedent("""
Usage:
%(std_usage)s
Options:
%(std_options)s""")
help_line = (" %%-%ds %%s"
% (max([5] + [len(a) for a in subcmds]), ))
env["actions"] = "\n".join(
help_line % (
name,
get_help(subcmd, subcmd_env(env, name), {}).split("\n")[0])
for name, subcmd in subcmds.items())
env["actions_help"] = "" if not env["actions"] else (
"ACTION could be one of:\n\n"
"%(actions)s\n\n"
"See '%(surcmd)s help ACTION' for more information "
"on a specific command."
% env)
if "%(std_usage)s" in doc:
env["std_usage"] = txt.indent(
("%(surcmd)s --help\n"
"%(surcmd)s --version" +
(("\n%(surcmd)s help [COMMAND]"
"\n%(surcmd)s ACTION [ARGS...]") if subcmds else ""))
% env,
_find_prefix(doc, "%(std_usage)s"),
first="")
if "%(std_options)s" in doc:
env["std_options"] = txt.indent(
"--help Show this screen.\n"
"--version Show version.",
_find_prefix(doc, "%(std_options)s"),
first="")
if subcmds and "%(actions_help)s" not in doc:
doc += "\n\n%(actions_help)s"
try:
output = doc % env
except KeyError as e:
msg.err("Doc interpolation of %s needed missing key %r"
% (aformat(env["surcmd"], attrs=["bold", ]),
e.args[0]))
exit(1)
except Exception as e:
msg.err(
"Documentation of %s is not valid. Please check it:\n%s"
% (aformat(env["surcmd"], attrs=["bold", ]),
doc))
exit(1)
return output | 0.000899 |
def to_transfac(self):
"""Return motif formatted in TRANSFAC format
Returns
-------
m : str
String of motif in TRANSFAC format.
"""
m = "%s\t%s\t%s\n" % ("DE", self.id, "unknown")
for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())):
m += "%i\t%s\t%s\n" % (i, "\t".join([str(int(x)) for x in row]), cons)
m += "XX"
return m | 0.009029 |
def delete_asset_content(self, asset_content_id=None):
"""Deletes content from an ``Asset``.
arg: asset_content_id (osid.id.Id): the ``Id`` of the
``AssetContent``
raise: NotFound - ``asset_content_id`` is not found
raise: NullArgument - ``asset_content_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
asset_content = self._get_asset_content(asset_content_id)
if asset_content.has_url() and 'amazonaws.com' in asset_content.get_url():
# print "Still have to implement removing files from aws"
key = asset_content.get_url().split('amazonaws.com')[1]
remove_file(self._config_map, key)
self._provider_session.delete_asset_content(asset_content_id)
else:
self._provider_session.delete_asset_content(asset_content_id) | 0.002944 |
def init_layout(self):
""" Create the widget in the layout pass after the child widget has
been created and intialized. We do this so the child widget does not
attempt to use this proxy widget as its parent and because
repositioning must be done after the widget is set.
"""
self.widget = QGraphicsProxyWidget(self.parent_widget())
widget = self.widget
for item in self.child_widgets():
widget.setWidget(item)
break
super(QtGraphicsWidget, self).init_widget()
super(QtGraphicsWidget, self).init_layout() | 0.003295 |
def track_execution(cmd, project, experiment, **kwargs):
"""Guard the execution of the given command.
The given command (`cmd`) will be executed inside a database context.
As soon as you leave the context we will commit the transaction.
Any necessary modifications to the database can be identified inside
the context with the RunInfo object.
Args:
cmd: The command we guard.
project: The project we track for.
experiment: The experiment we track for.
Yields:
RunInfo: A context object that carries the necessary
database transaction.
"""
runner = RunInfo(cmd=cmd, project=project, experiment=experiment, **kwargs)
yield runner
runner.commit() | 0.001361 |
def urlopen(link):
"""Return urllib2 urlopen
"""
try:
return urllib2.urlopen(link)
except urllib2.URLError:
pass
except ValueError:
return ""
except KeyboardInterrupt:
print("")
raise SystemExit() | 0.003846 |
def open(self):
"""Open the device."""
self._serial.port = self._port
self._serial.baudrate = self._baud
self._serial.timeout = self._timeout
self._serial.open()
self._serial.flushInput()
self._serial.flushOutput() | 0.007407 |
def NewFromJSON(data):
"""
Create a new SharedFile instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a SharedFile.
Returns:
A SharedFile instance.
"""
return SharedFile(
sharekey=data.get('sharekey', None),
name=data.get('name', None),
user=User.NewFromJSON(data.get('user', None)),
title=data.get('title', None),
description=data.get('description', None),
posted_at=data.get('posted_at', None),
permalink=data.get('permalink', None),
width=data.get('width', None),
height=data.get('height', None),
views=data.get('views', 0),
likes=data.get('likes', 0),
saves=data.get('saves', 0),
comments=data.get('comments', None),
nsfw=data.get('nsfw', False),
image_url=data.get('image_url', None),
source_url=data.get('source_url', None),
saved=data.get('saved', False),
liked=data.get('liked', False),
) | 0.001789 |
def parse_args(arguments, wrapper_kwargs={}):
"""
MMI Runner
"""
# make a socket that replies to message with the grid
# if we are running mpi we want to know the rank
args = {}
positional = [
'engine',
'configfile',
]
for key in positional:
args[key] = arguments['<' + key + '>']
# integer if not 'random'
port = arguments['--port']
args['port'] = port if port == 'random' else int(port)
# integer, default 1
interval = arguments['--interval']
args['interval'] = int(interval) if interval else 1
# boolean
args['pause'] = bool(arguments['--pause'])
args['bmi_class'] = arguments['--bmi-class']
args['output_vars'] = arguments['-o']
args['tracker'] = arguments['--track']
return args | 0.001256 |
def dailymotion_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
"""Downloads Dailymotion videos by URL.
"""
html = get_content(rebuilt_url(url))
info = json.loads(match1(html, r'qualities":({.+?}),"'))
title = match1(html, r'"video_title"\s*:\s*"([^"]+)"') or \
match1(html, r'"title"\s*:\s*"([^"]+)"')
title = unicodize(title)
for quality in ['1080','720','480','380','240','144','auto']:
try:
real_url = info[quality][1]["url"]
if real_url:
break
except KeyError:
pass
mime, ext, size = url_info(real_url)
print_info(site_info, title, mime, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir=output_dir, merge=merge) | 0.0125 |
def login(self, username=None, password=None,
section='default'):
"""
Created the passport with ``username`` and ``password`` and log in.
If either ``username`` or ``password`` is None or omitted, the
credentials file will be parsed.
:param str username: username to login (email, phone number or user ID)
:param str password: password
:param str section: section name in the credential file
:raise: raises :class:`.AuthenticationError` if failed to login
"""
if self.has_logged_in:
return True
if username is None or password is None:
credential = conf.get_credential(section)
username = credential['username']
password = credential['password']
passport = Passport(username, password)
r = self.http.post(LOGIN_URL, passport.form)
if r.state is True:
# Bind this passport to API
self.passport = passport
passport.data = r.content['data']
self._user_id = r.content['data']['USER_ID']
return True
else:
msg = None
if 'err_name' in r.content:
if r.content['err_name'] == 'account':
msg = 'Account does not exist.'
elif r.content['err_name'] == 'passwd':
msg = 'Password is incorrect.'
raise AuthenticationError(msg) | 0.002048 |
def runtime(self):
"""Transitional property providing access to the new timer
mechanism. This will be removed in the future.
"""
warnings.warn("admm.ADMM.runtime attribute has been replaced by "
"an upgraded timer class: please see the documentation "
"for admm.ADMM.solve method and util.Timer class",
PendingDeprecationWarning)
return self.timer.elapsed('init') + self.timer.elapsed('solve') | 0.004 |
def calc_flooddischarge_v1(self):
"""Calculate the discharge during and after a flood event based on an
|anntools.SeasonalANN| describing the relationship(s) between discharge
and water stage.
Required control parameter:
|WaterLevel2FloodDischarge|
Required derived parameter:
|dam_derived.TOY|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|FloodDischarge|
Example:
The control parameter |WaterLevel2FloodDischarge| is derived from
|SeasonalParameter|. This allows to simulate different seasonal
dam control schemes. To show that the seasonal selection mechanism
is implemented properly, we define a short simulation period of
three days:
>>> from hydpy import pub
>>> pub.timegrids = '2001.01.01', '2001.01.04', '1d'
Now we prepare a dam model and define two different relationships
between water level and flood discharge. The first relatively
simple relationship (for January, 2) is based on two neurons
contained in a single hidden layer and is used in the following
example. The second neural network (for January, 3) is not
applied at all, which is why we do not need to assign any parameter
values to it:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> waterlevel2flooddischarge(
... _01_02_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1,
... weights_input=[[50., 4]],
... weights_output=[[2.], [30]],
... intercepts_hidden=[[-13000, -1046]],
... intercepts_output=[0.]),
... _01_03_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1))
>>> derived.toy.update()
>>> model.idx_sim = pub.timegrids.sim['2001.01.02']
The following example shows two distinct effects of both neurons
in the first network. One neuron describes a relatively sharp
increase between 259.8 and 260.2 meters from about 0 to 2 mยณ/s.
This could describe a release of water through a bottom outlet
controlled by a valve. The add something like an exponential
increase between 260 and 261 meters, which could describe the
uncontrolled flow over a spillway:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_flooddischarge_v1,
... last_example=21,
... parseqs=(aides.waterlevel,
... fluxes.flooddischarge))
>>> test.nexts.waterlevel = numpy.arange(257, 261.1, 0.2)
>>> test()
| ex. | waterlevel | flooddischarge |
-------------------------------------
| 1 | 257.0 | 0.0 |
| 2 | 257.2 | 0.000001 |
| 3 | 257.4 | 0.000002 |
| 4 | 257.6 | 0.000005 |
| 5 | 257.8 | 0.000011 |
| 6 | 258.0 | 0.000025 |
| 7 | 258.2 | 0.000056 |
| 8 | 258.4 | 0.000124 |
| 9 | 258.6 | 0.000275 |
| 10 | 258.8 | 0.000612 |
| 11 | 259.0 | 0.001362 |
| 12 | 259.2 | 0.003031 |
| 13 | 259.4 | 0.006745 |
| 14 | 259.6 | 0.015006 |
| 15 | 259.8 | 0.033467 |
| 16 | 260.0 | 1.074179 |
| 17 | 260.2 | 2.164498 |
| 18 | 260.4 | 2.363853 |
| 19 | 260.6 | 2.79791 |
| 20 | 260.8 | 3.719725 |
| 21 | 261.0 | 5.576088 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
aid = self.sequences.aides.fastaccess
con.waterlevel2flooddischarge.inputs[0] = aid.waterlevel
con.waterlevel2flooddischarge.process_actual_input(der.toy[self.idx_sim])
flu.flooddischarge = con.waterlevel2flooddischarge.outputs[0] | 0.000232 |
def under_attack(col, queens):
"""Checks if queen is under attack
:param col: Column number
:param queens: list of queens
:return: True iff queen is under attack
"""
left = right = col
for _, column in reversed(queens):
left, right = left - 1, right + 1
if column in (left, col, right):
return True
return False | 0.004808 |
def record(*a, **kw):
"""
Are you tired of typing class declarations that look like this::
class StuffInfo:
def __init__(self, a=None, b=None, c=None, d=None, e=None,
f=None, g=None, h=None, i=None, j=None):
self.a = a
self.b = b
self.c = c
self.d = d
# ...
Epsilon can help! That's right - for a limited time only, this function
returns a class which provides a shortcut. The above can be simplified
to::
StuffInfo = record(a=None, b=None, c=None, d=None, e=None,
f=None, g=None, h=None, i=None, j=None)
if the arguments are required, rather than having defaults, it could be
even shorter::
StuffInfo = record('a b c d e f g h i j')
Put more formally: C{record} optionally takes one positional argument, a
L{str} representing attribute names as whitespace-separated identifiers; it
also takes an arbitrary number of keyword arguments, which map attribute
names to their default values. If no positional argument is provided, the
names of attributes will be inferred from the names of the defaults
instead.
"""
if len(a) == 1:
attributeNames = a[0].split()
elif len(a) == 0:
if not kw:
raise TypeError("Attempted to define a record with no attributes.")
attributeNames = kw.keys()
attributeNames.sort()
else:
raise TypeError(
"record must be called with zero or one positional arguments")
# Work like Python: allow defaults specified backwards from the end
defaults = []
for attributeName in attributeNames:
default = kw.pop(attributeName, _NOT_SPECIFIED)
if defaults:
if default is _NOT_SPECIFIED:
raise TypeError(
"You must specify default values like in Python; "
"backwards from the end of the argument list, "
"with no gaps")
else:
defaults.append(default)
elif default is not _NOT_SPECIFIED:
defaults.append(default)
else:
# This space left intentionally blank.
pass
if kw:
raise TypeError("The following defaults did not apply: %r" % (kw,))
return type('Record<%s>' % (' '.join(attributeNames),),
(StructBehavior,),
dict(__names__=attributeNames,
__defaults__=defaults)) | 0.000392 |
def at(self, p):
"""
Returns the set of all intervals that contain p.
Completes in O(m + log n) time, where:
* n = size of the tree
* m = number of matches
:rtype: set of Interval
"""
root = self.top_node
if not root:
return set()
return root.search_point(p, set()) | 0.005525 |
def add_reactions(self, reaction_list):
"""Add reactions to the model.
Reactions with identifiers identical to a reaction already in the
model are ignored.
The change is reverted upon exit when using the model as a context.
Parameters
----------
reaction_list : list
A list of `cobra.Reaction` objects
"""
def existing_filter(rxn):
if rxn.id in self.reactions:
LOGGER.warning(
"Ignoring reaction '%s' since it already exists.", rxn.id)
return False
return True
# First check whether the reactions exist in the model.
pruned = DictList(filter(existing_filter, reaction_list))
context = get_context(self)
# Add reactions. Also take care of genes and metabolites in the loop.
for reaction in pruned:
reaction._model = self
# Build a `list()` because the dict will be modified in the loop.
for metabolite in list(reaction.metabolites):
# TODO: Should we add a copy of the metabolite instead?
if metabolite not in self.metabolites:
self.add_metabolites(metabolite)
# A copy of the metabolite exists in the model, the reaction
# needs to point to the metabolite in the model.
else:
# FIXME: Modifying 'private' attributes is horrible.
stoichiometry = reaction._metabolites.pop(metabolite)
model_metabolite = self.metabolites.get_by_id(
metabolite.id)
reaction._metabolites[model_metabolite] = stoichiometry
model_metabolite._reaction.add(reaction)
if context:
context(partial(
model_metabolite._reaction.remove, reaction))
for gene in list(reaction._genes):
# If the gene is not in the model, add it
if not self.genes.has_id(gene.id):
self.genes += [gene]
gene._model = self
if context:
# Remove the gene later
context(partial(self.genes.__isub__, [gene]))
context(partial(setattr, gene, '_model', None))
# Otherwise, make the gene point to the one in the model
else:
model_gene = self.genes.get_by_id(gene.id)
if model_gene is not gene:
reaction._dissociate_gene(gene)
reaction._associate_gene(model_gene)
self.reactions += pruned
if context:
context(partial(self.reactions.__isub__, pruned))
# from cameo ...
self._populate_solver(pruned) | 0.000685 |
def add(self, variable, range_):
"""
Add a new low and high bound for a variable.
As it is flow insensitive, it compares it with old values and update it
if needed.
"""
if variable not in self.result:
self.result[variable] = range_
else:
self.result[variable] = self.result[variable].union(range_)
return self.result[variable] | 0.004819 |
def get_noalt_contigs(data):
"""Retrieve contigs without alternatives as defined in bwa *.alts files.
If no alt files present (when we're not aligning with bwa), work around
with standard set of alts based on hg38 -- anything with HLA, _alt or
_decoy in the name.
"""
alts = set([])
alt_files = [f for f in tz.get_in(["reference", "bwa", "indexes"], data, []) if f.endswith("alt")]
if alt_files:
for alt_file in alt_files:
with open(alt_file) as in_handle:
for line in in_handle:
if not line.startswith("@"):
alts.add(line.split()[0].strip())
else:
for contig in ref.file_contigs(dd.get_ref_file(data)):
if ("_alt" in contig.name or "_decoy" in contig.name or
contig.name.startswith("HLA-") or ":" in contig.name):
alts.add(contig.name)
return [c for c in ref.file_contigs(dd.get_ref_file(data)) if c.name not in alts] | 0.004028 |
def check_appt(self, complex: str, house: str, appt: str) -> bool:
"""
Check if given appartment exists in the rumetr database
"""
self.check_house(complex, house)
if '%s__%s__%s' % (complex, house, appt) in self._checked_appts:
return True
try:
self.get('developers/{developer}/complexes/{complex}/houses/{house}/appts/{appt}'.format(
developer=self.developer,
complex=complex,
house=house,
appt=appt,
))
except exceptions.Rumetr404Exception:
raise exceptions.RumetrApptNotFound('Unknown appt (house is known) โย may be you should create one?')
self._checked_appts.add('%s__%s__%s' % (complex, house, appt))
return True | 0.004969 |
def register_types(name, *types):
"""
Register a short name for one or more content types.
"""
type_names.setdefault(name, set())
for t in types:
# Redirecting the type
if t in media_types:
type_names[media_types[t]].discard(t)
# Save the mapping
media_types[t] = name
type_names[name].add(t) | 0.002732 |
def indicator_associations_types(
self, indicator_type, api_entity=None, api_branch=None, params=None
):
"""
Gets the indicator association from a Indicator/Group/Victim
Args:
indicator_type:
api_entity:
api_branch:
params:
Returns:
"""
if params is None:
params = {}
if not self.can_update():
self._tcex.handle_error(910, [self.type])
target = self._tcex.ti.indicator(indicator_type)
for at in self.tc_requests.indicator_associations_types(
self.api_type,
self.api_sub_type,
self.unique_id,
target,
api_entity=api_entity,
api_branch=api_branch,
owner=self.owner,
params=params,
):
yield at | 0.003464 |
def clear_zone_conditions(self):
"""stub"""
if (self.get_zone_conditions_metadata().is_read_only() or
self.get_zone_conditions_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['zoneConditions'] = \
self._zone_conditions_metadata['default_object_values'][0] | 0.005747 |
def _CopyDateTimeFromStringISO8601(self, time_string):
"""Copies a date and time from an ISO 8601 date and time string.
Args:
time_string (str): time value formatted as:
hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The fraction of second and
time zone offset are optional.
Returns:
tuple[int, int, int, int, int]: hours, minutes, seconds, microseconds,
time zone offset in minutes.
Raises:
ValueError: if the time string is invalid or not supported.
"""
if not time_string:
raise ValueError('Invalid time string.')
time_string_length = len(time_string)
year, month, day_of_month = self._CopyDateFromString(time_string)
if time_string_length <= 10:
return {
'year': year,
'month': month,
'day_of_month': day_of_month}
# If a time of day is specified the time string it should at least
# contain 'YYYY-MM-DDThh'.
if time_string[10] != 'T':
raise ValueError(
'Invalid time string - missing as date and time separator.')
hours, minutes, seconds, microseconds, time_zone_offset = (
self._CopyTimeFromStringISO8601(time_string[11:]))
if time_zone_offset:
year, month, day_of_month, hours, minutes = self._AdjustForTimeZoneOffset(
year, month, day_of_month, hours, minutes, time_zone_offset)
date_time_values = {
'year': year,
'month': month,
'day_of_month': day_of_month,
'hours': hours,
'minutes': minutes,
'seconds': seconds}
if microseconds is not None:
date_time_values['microseconds'] = microseconds
return date_time_values | 0.003917 |
def _HandleMetadataUpdate(
self, metadata_key='', recursive=True, wait=True, timeout=None,
retry=True):
"""Wait for a successful metadata response.
Args:
metadata_key: string, the metadata key to watch for changes.
recursive: bool, True if we should recursively watch for metadata changes.
wait: bool, True if we should wait for a metadata change.
timeout: int, timeout in seconds for returning metadata output.
retry: bool, True if we should retry on failure.
Returns:
json, the deserialized contents of the metadata server.
"""
exception = None
while True:
try:
return self._GetMetadataUpdate(
metadata_key=metadata_key, recursive=recursive, wait=wait,
timeout=timeout)
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
if not isinstance(e, type(exception)):
exception = e
self.logger.error('GET request error retrieving metadata. %s.', e)
if retry:
continue
else:
break | 0.007428 |
def Reset(self):
"""Reset the lexer to process a new data feed."""
# The first state
self.state = "INITIAL"
self.state_stack = []
# The buffer we are parsing now
self.buffer = ""
self.error = 0
self.verbose = 0
# The index into the buffer where we are currently pointing
self.processed = 0
self.processed_buffer = "" | 0.002762 |
def object_emitter(target, source, env, parent_emitter):
"""Sets up the PCH dependencies for an object file."""
validate_vars(env)
parent_emitter(target, source, env)
# Add a dependency, but only if the target (e.g. 'Source1.obj')
# doesn't correspond to the pre-compiled header ('Source1.pch').
# If the basenames match, then this was most likely caused by
# someone adding the source file to both the env.PCH() and the
# env.Program() calls, and adding the explicit dependency would
# cause a cycle on the .pch file itself.
#
# See issue #2505 for a discussion of what to do if it turns
# out this assumption causes trouble in the wild:
# http://scons.tigris.org/issues/show_bug.cgi?id=2505
if 'PCH' in env:
pch = env['PCH']
if str(target[0]) != SCons.Util.splitext(str(pch))[0] + '.obj':
env.Depends(target, pch)
return (target, source) | 0.001074 |
def download(fname, input_dir, dl_dir=None):
"""Download the resource from the storage."""
try:
manager = _get_storage_manager(fname)
except ValueError:
return fname
return manager.download(fname, input_dir, dl_dir) | 0.004049 |
def log_calls(func):
'''Decorator to log function calls.'''
def wrapper(*args, **kargs):
callStr = "%s(%s)" % (func.__name__, ", ".join([repr(p) for p in args] + ["%s=%s" % (k, repr(v)) for (k, v) in list(kargs.items())]))
debug(">> %s", callStr)
ret = func(*args, **kargs)
debug("<< %s: %s", callStr, repr(ret))
return ret
return wrapper | 0.013736 |
def put_worker(func, from_idx, to_idx, params, out_q):
"""
put worker
"""
succ, fail = func(from_idx, to_idx, params)
return out_q.put({'succ': succ, 'fail': fail}) | 0.005435 |
def exec_command(
client, container, command, interactive=True, stdout=None, stderr=None, stdin=None):
"""
Run provided command via exec API in provided container.
This is just a wrapper for PseudoTerminal(client, container).exec_command()
"""
exec_id = exec_create(client, container, command, interactive=interactive)
operation = ExecOperation(client, exec_id,
interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin)
PseudoTerminal(client, operation).start() | 0.005566 |
def neighborhood_cortical_magnification(mesh, coordinates):
'''
neighborhood_cortical_magnification(mesh, visual_coordinates) yields a list of neighborhood-
based cortical magnification values for the vertices in the given mesh if their visual field
coordinates are given by the visual_coordinates matrix (must be like [x_values, y_values]). If
either x-value or y-value of a coordinate is either None or numpy.nan, then that cortical
magnification value is numpy.nan.
'''
idcs = _cmag_coord_idcs(coordinates)
neis = mesh.tess.indexed_neighborhoods
coords_vis = np.asarray(coordinates if len(coordinates) == 2 else coordinates.T)
coords_srf = mesh.coordinates
res = np.full((mesh.vertex_count, 3), np.nan, dtype=np.float)
res = np.array([row for row in [(np.nan,np.nan,np.nan)] for _ in range(mesh.vertex_count)],
dtype=np.float)
for idx in idcs:
nei = neis[idx]
pts_vis = coords_vis[:,nei]
pts_srf = coords_srf[:,nei]
x0_vis = coords_vis[:,idx]
x0_srf = coords_srf[:,idx]
if any(u is None or np.isnan(u) for pt in pts_vis for u in pt): continue
# find tangential, radial, and areal magnifications
x0col_vis = np.asarray([x0_vis]).T
x0col_srf = np.asarray([x0_srf]).T
# areal is easy
voronoi_vis = (pts_vis - x0col_vis) * 0.5 + x0col_vis
voronoi_srf = (pts_srf - x0col_srf) * 0.5 + x0col_srf
area_vis = np.sum([geo.triangle_area(x0_vis, a, b)
for (a,b) in zip(voronoi_vis.T, np.roll(voronoi_vis, 1, axis=1).T)])
area_srf = np.sum([geo.triangle_area(x0_srf, a, b)
for (a,b) in zip(voronoi_srf.T, np.roll(voronoi_srf, 1, axis=1).T)])
res[idx,2] = np.inf if np.isclose(area_vis, 0) else area_srf/area_vis
# radial and tangentual we do together because they are very similar:
# find the intersection lines then add up their distances along the cortex
pts_vis = voronoi_vis
pts_srf = voronoi_srf
segs_srf = (pts_srf, np.roll(pts_srf, -1, axis=1))
segs_vis = (pts_vis, np.roll(pts_vis, -1, axis=1))
segs_vis_t = np.transpose(segs_vis, (2,0,1))
segs_srf_t = np.transpose(segs_srf, (2,0,1))
x0norm_vis = npla.norm(x0_vis)
if not np.isclose(x0norm_vis, 0):
dirvecs = x0_vis / x0norm_vis
dirvecs = np.asarray([dirvecs, [-dirvecs[1], dirvecs[0]]])
for dirno in [0,1]:
dirvec = dirvecs[dirno]
line = (x0_vis, x0_vis + dirvec)
try:
isects_vis = np.asarray(geo.line_segment_intersection_2D(line, segs_vis))
# okay, these will all be nan but two of them; they are the points we care about
isect_idcs = np.unique(np.where(np.logical_not(np.isnan(isects_vis)))[1])
except Exception:
isect_idcs = []
if len(isect_idcs) != 2:
res[idx,dirno] = np.nan
continue
isects_vis = isects_vis[:,isect_idcs].T
# we need the distance in visual space
len_vis = npla.norm(isects_vis[0] - isects_vis[1])
if np.isclose(len_vis, 0): res[idx,dirno] = np.inf
else:
# we also need the distances on the surface: find the points by projection
fsegs_srf = segs_srf_t[isect_idcs]
fsegs_vis = segs_vis_t[isect_idcs]
s02lens_vis = npla.norm(fsegs_vis[:,0] - fsegs_vis[:,1], axis=1)
s01lens_vis = npla.norm(fsegs_vis[:,0] - isects_vis, axis=1)
vecs_srf = fsegs_srf[:,1] - fsegs_srf[:,0]
s02lens_srf = npla.norm(vecs_srf, axis=1)
isects_srf = np.transpose([(s01lens_vis/s02lens_vis)]) * vecs_srf \
+ fsegs_srf[:,0]
len_srf = np.sum(npla.norm(isects_srf - x0_srf, axis=1))
res[idx,dirno] = len_srf / len_vis
# That's it!
return res | 0.010794 |
def get_filename(self, renew=False):
"""Get the filename of this content.
If the file name doesn't already exist, we created it as {id}.{format}.
"""
if self._fname is None or renew:
self._fname = '%s.%s' % (self._id, self._format)
return self._fname | 0.006601 |
def from_df(cls, path, df:DataFrame, dep_var:str, valid_idx:Collection[int], procs:OptTabTfms=None,
cat_names:OptStrList=None, cont_names:OptStrList=None, classes:Collection=None,
test_df=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None,
device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False)->DataBunch:
"Create a `DataBunch` from `df` and `valid_idx` with `dep_var`. `kwargs` are passed to `DataBunch.create`."
cat_names = ifnone(cat_names, []).copy()
cont_names = ifnone(cont_names, list(set(df)-set(cat_names)-{dep_var}))
procs = listify(procs)
src = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(valid_idx))
src = src.label_from_df(cols=dep_var) if classes is None else src.label_from_df(cols=dep_var, classes=classes)
if test_df is not None: src.add_test(TabularList.from_df(test_df, cat_names=cat_names, cont_names=cont_names,
processor = src.train.x.processor))
return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device,
collate_fn=collate_fn, no_check=no_check) | 0.042507 |
def get_processed_hotkeys(hotkeys=None):
"""
Process passed dict with key combinations or the HOTKEYS dict from
settings.
"""
hotkeys = hotkeys or ks_settings.HOTKEYS
processed_hotkeys = AutoVivification()
if not hotkeys:
return processed_hotkeys
for combination in hotkeys:
key_codes = get_key_codes(combination['keys'])
if len(key_codes) == 1:
processed_hotkeys[key_codes[0]] = get_combination_action(combination)
elif len(key_codes) == 2:
processed_hotkeys[key_codes[0]][key_codes[1]] = get_combination_action(combination)
elif len(key_codes) == 3:
processed_hotkeys[key_codes[0]][key_codes[1]][key_codes[2]] = get_combination_action(combination)
# TODO: make dynamic vivification
return processed_hotkeys | 0.004819 |
def build_parallel(parallel_mode, quiet=True, processes=4,
user_modules=None, dispatcher_options=None):
"""initializes `Parallel`
Parameters
----------
parallel_mode : str
"multiprocessing" (default), "htcondor" or "subprocess"
quiet : bool, optional
if True, progress bars will not be shown in the "multiprocessing" mode.
process : int, optional
The number of processes when ``parallel_mode`` is
"multiprocessing"
user_modules : list, optional
The names of modules to be sent to worker nodes when
parallel_mode is "htcondor"
dispatcher_options : dict, optional
Options to dispatcher
Returns
-------
parallel
an instance of the class `Parallel`
"""
if user_modules is None:
user_modules = [ ]
if dispatcher_options is None:
dispatcher_options = dict()
dispatchers = ('subprocess', 'htcondor')
parallel_modes = ('multiprocessing', ) + dispatchers
default_parallel_mode = 'multiprocessing'
if not parallel_mode in parallel_modes:
logger = logging.getLogger(__name__)
logger.warning('unknown parallel_mode "{}", use default "{}"'.format(
parallel_mode, default_parallel_mode
))
parallel_mode = default_parallel_mode
if parallel_mode == 'multiprocessing':
if quiet:
atpbar.disable()
return _build_parallel_multiprocessing(processes=processes)
return _build_parallel_dropbox(
parallel_mode=parallel_mode,
user_modules=user_modules,
dispatcher_options=dispatcher_options
) | 0.001814 |
def to_pascal_case(s):
"""Transform underscore separated string to pascal case
"""
return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), s.capitalize()) | 0.011364 |
def transitions(self, return_matrix=True):
"""Returns the routing probabilities for each vertex in the
graph.
Parameters
----------
return_matrix : bool (optional, the default is ``True``)
Specifies whether an :class:`~numpy.ndarray` is returned.
If ``False``, a dict is returned instead.
Returns
-------
out : a dict or :class:`~numpy.ndarray`
The transition probabilities for each vertex in the graph.
If ``out`` is an :class:`~numpy.ndarray`, then
``out[v, u]`` returns the probability of a transition from
vertex ``v`` to vertex ``u``. If ``out`` is a dict
then ``out_edge[v][u]`` is the probability of moving from
vertex ``v`` to the vertex ``u``.
Examples
--------
Lets change the routing probabilities:
>>> import queueing_tool as qt
>>> import networkx as nx
>>> g = nx.sedgewick_maze_graph()
>>> net = qt.QueueNetwork(g)
Below is an adjacency list for the graph ``g``.
>>> ans = qt.graph2dict(g, False)
>>> {k: sorted(v) for k, v in ans.items()}
... # doctest: +NORMALIZE_WHITESPACE
{0: [2, 5, 7],
1: [7],
2: [0, 6],
3: [4, 5],
4: [3, 5, 6, 7],
5: [0, 3, 4],
6: [2, 4],
7: [0, 1, 4]}
The default transition matrix is every out edge being equally
likely:
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.333..., 5: 0.333..., 7: 0.333...},
1: {7: 1.0},
2: {0: 0.5, 6: 0.5},
3: {4: 0.5, 5: 0.5},
4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25},
5: {0: 0.333..., 3: 0.333..., 4: 0.333...},
6: {2: 0.5, 4: 0.5},
7: {0: 0.333..., 1: 0.333..., 4: 0.333...}}
Now we will generate a random routing matrix:
>>> mat = qt.generate_transition_matrix(g, seed=96)
>>> net.set_transitions(mat)
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.112..., 5: 0.466..., 7: 0.420...},
1: {7: 1.0},
2: {0: 0.561..., 6: 0.438...},
3: {4: 0.545..., 5: 0.454...},
4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...},
5: {0: 0.265..., 3: 0.460..., 4: 0.274...},
6: {2: 0.673..., 4: 0.326...},
7: {0: 0.033..., 1: 0.336..., 4: 0.630...}}
What this shows is the following: when an :class:`.Agent` is at
vertex ``2`` they will transition to vertex ``0`` with
probability ``0.561`` and route to vertex ``6`` probability
``0.438``, when at vertex ``6`` they will transition back to
vertex ``2`` with probability ``0.673`` and route vertex ``4``
probability ``0.326``, etc.
"""
if return_matrix:
mat = np.zeros((self.nV, self.nV))
for v in self.g.nodes():
ind = [e[1] for e in sorted(self.g.out_edges(v))]
mat[v, ind] = self._route_probs[v]
else:
mat = {
k: {e[1]: p for e, p in zip(sorted(self.g.out_edges(k)), value)}
for k, value in enumerate(self._route_probs)
}
return mat | 0.000869 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.