text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def set_printer(self, file_name, basename):
"""initialize DotWriter and add options for layout.
"""
layout = dict(rankdir="BT")
self.printer = DotBackend(basename, additional_param=layout)
self.file_name = file_name | 0.007843 |
def core_choice_fields(metadata_class):
""" If the 'optional' core fields (_site and _language) are required,
list them here.
"""
fields = []
if metadata_class._meta.use_sites:
fields.append('_site')
if metadata_class._meta.use_i18n:
fields.append('_language')
return fields | 0.009259 |
def transform(self, data=None):
"""
Return transformed data, or transform new data using the same model
parameters
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to transform. If no data is passed, the xform_data from
the DataGeometry object will be returned.
Returns
----------
xformed_data : list of numpy arrays
The transformed data
"""
# if no new data passed,
if data is None:
return self.xform_data
else:
formatted = format_data(
data,
semantic=self.semantic,
vectorizer=self.vectorizer,
corpus=self.corpus,
ppca=True)
norm = normalizer(formatted, normalize=self.normalize)
reduction = reducer(
norm,
reduce=self.reduce,
ndims=self.reduce['params']['n_components'])
return aligner(reduction, align=self.align) | 0.001835 |
def to_email(email_class, email, language=None, **data):
"""
Send email to specified email address
"""
if language:
email_class().send([email], language=language, **data)
else:
email_class().send([email], translation.get_language(), **data) | 0.003623 |
def _guess_normalized(self):
"""Returns true if the collated counts in `self._results` appear to be normalized.
Notes
-----
It's possible that the _results df has already been normalized, which can cause some
methods to fail. This method lets us guess whether that's true and act accordingly.
"""
return (
getattr(self, "_normalized", False)
or getattr(self, "_field", None) == "abundance"
or bool((self._results.sum(axis=1).round(4) == 1.0).all())
) | 0.009074 |
def _get_op_name(op, special):
"""
Find the name to attach to this method according to conventions
for special and non-special methods.
Parameters
----------
op : binary operator
special : bool
Returns
-------
op_name : str
"""
opname = op.__name__.strip('_')
if special:
opname = '__{opname}__'.format(opname=opname)
return opname | 0.002519 |
def iterbyscore(self, min='-inf', max='+inf', start=None, num=None,
withscores=False, reverse=None):
""" Return a range of values from the sorted set name with scores
between @min and @max.
If @start and @num are specified, then return a slice
of the range.
@min: #int minimum score, or #str '-inf'
@max: #int minimum score, or #str '+inf'
@start: #int starting range position
@num: #int number of members to fetch
@withscores: #bool indicates to return the scores along with the
members, as a list of |(member, score)| pairs
@reverse: #bool indicating whether to sort the results descendingly
-> yields members or |(member, score)| #tuple pairs
"""
reverse = reverse if reverse is not None else self.reversed
zfunc = self._client.zrangebyscore if not reverse \
else self._client.zrevrangebyscore
_loads = self._loads
for member in zfunc(
self.key_prefix, min=min, max=max, start=start, num=num,
withscores=withscores, score_cast_func=self.cast):
if withscores:
yield (_loads(member[0]), self.cast(member[1]))
else:
yield _loads(member) | 0.002251 |
def convert2(self, imtls, sids):
"""
Convert a probability map into a composite array of shape (N,)
and dtype `imtls.dt`.
:param imtls:
DictArray instance
:param sids:
the IDs of the sites we are interested in
:returns:
an array of curves of shape (N,)
"""
assert self.shape_z == 1, self.shape_z
curves = numpy.zeros(len(sids), imtls.dt)
for imt in curves.dtype.names:
curves_by_imt = curves[imt]
for i, sid in numpy.ndenumerate(sids):
try:
pcurve = self[sid]
except KeyError:
pass # the poes will be zeros
else:
curves_by_imt[i] = pcurve.array[imtls(imt), 0]
return curves | 0.002407 |
def _cell_scalar(self, name=None):
"""
Returns the cell scalars of a vtk object
Parameters
----------
name : str
Name of cell scalars to retrive.
Returns
-------
scalars : np.ndarray
Numpy array of scalars
"""
if name is None:
# use active scalar array
field, name = self.active_scalar_info
if field != CELL_DATA_FIELD:
raise RuntimeError('Must specify an array to fetch.')
vtkarr = self.GetCellData().GetArray(name)
if vtkarr is None:
raise AssertionError('({}) is not a cell scalar'.format(name))
# numpy does not support bit array data types
if isinstance(vtkarr, vtk.vtkBitArray):
vtkarr = vtk_bit_array_to_char(vtkarr)
if name not in self._cell_bool_array_names:
self._cell_bool_array_names.append(name)
array = vtk_to_numpy(vtkarr)
if array.dtype == np.uint8 and name in self._cell_bool_array_names:
array = array.view(np.bool)
return array | 0.001773 |
def application_version_set(version):
"""Charm authors may trigger this command from any hook to output what
version of the application is running. This could be a package version,
for instance postgres version 9.5. It could also be a build number or
version control revision identifier, for instance git sha 6fb7ba68. """
cmd = ['application-version-set']
cmd.append(version)
try:
subprocess.check_call(cmd)
except OSError:
log("Application Version: {}".format(version)) | 0.001923 |
def assign_machine(self, machine, lock_type, token):
"""Assigns the machine object associated with this direct-type
session or informs the session that it will be a remote one
(if @a machine == @c null).
in machine of type :class:`IMachine`
in lock_type of type :class:`LockType`
in token of type :class:`IToken`
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(machine, IMachine):
raise TypeError("machine can only be an instance of type IMachine")
if not isinstance(lock_type, LockType):
raise TypeError("lock_type can only be an instance of type LockType")
if not isinstance(token, IToken):
raise TypeError("token can only be an instance of type IToken")
self._call("assignMachine",
in_p=[machine, lock_type, token]) | 0.005714 |
def index_record(self, json_path):
"""
Import `json_path` and remove it if :attr:`keep_json` is false.
"""
self.logger.debug('Indexing record: %s', json_path)
json_path = os.path.abspath(json_path)
self.check_path(json_path, '`json_path`')
with open(json_path) as fp:
try:
dct = json.load(fp)
except ValueError:
warnings.warn(
'Ignoring invalid JSON file at: {0}'.format(json_path))
return
record_type = self.get_record_type(json_path)
kwds = {}
if record_type == 'command':
importer = self.db.import_dict
kwds.update(check_duplicate=self.check_duplicate)
elif record_type == 'init':
importer = self.db.import_init_dict
elif record_type == 'exit':
importer = self.db.import_exit_dict
else:
raise ValueError("Unknown record type: {0}".format(record_type))
importer(dct, **kwds)
if not self.keep_json:
self.logger.info('Removing JSON record: %s', json_path)
os.remove(json_path) | 0.001702 |
def _fake_designspace(self, ufos):
"""Build a fake designspace with the given UFOs as sources, so that all
builder functions can rely on the presence of a designspace.
"""
designspace = designspaceLib.DesignSpaceDocument()
ufo_to_location = defaultdict(dict)
# Make weight and width axis if relevant
for info_key, axis_def in zip(
("openTypeOS2WeightClass", "openTypeOS2WidthClass"),
(WEIGHT_AXIS_DEF, WIDTH_AXIS_DEF),
):
axis = designspace.newAxisDescriptor()
axis.tag = axis_def.tag
axis.name = axis_def.name
mapping = []
for ufo in ufos:
user_loc = getattr(ufo.info, info_key)
if user_loc is not None:
design_loc = class_to_value(axis_def.tag, user_loc)
mapping.append((user_loc, design_loc))
ufo_to_location[ufo][axis_def.name] = design_loc
mapping = sorted(set(mapping))
if len(mapping) > 1:
axis.map = mapping
axis.minimum = min([user_loc for user_loc, _ in mapping])
axis.maximum = max([user_loc for user_loc, _ in mapping])
axis.default = min(
axis.maximum, max(axis.minimum, axis_def.default_user_loc)
)
designspace.addAxis(axis)
for ufo in ufos:
source = designspace.newSourceDescriptor()
source.font = ufo
source.familyName = ufo.info.familyName
source.styleName = ufo.info.styleName
# source.name = '%s %s' % (source.familyName, source.styleName)
source.path = ufo.path
source.location = ufo_to_location[ufo]
designspace.addSource(source)
# UFO-level skip list lib keys are usually ignored, except when we don't have a
# Designspace file to start from. If they exist in the UFOs, promote them to a
# Designspace-level lib key. However, to avoid accidents, expect the list to
# exist in none or be the same in all UFOs.
if any("public.skipExportGlyphs" in ufo.lib for ufo in ufos):
skip_export_glyphs = {
frozenset(ufo.lib.get("public.skipExportGlyphs", [])) for ufo in ufos
}
if len(skip_export_glyphs) == 1:
designspace.lib["public.skipExportGlyphs"] = sorted(
next(iter(skip_export_glyphs))
)
else:
raise ValueError(
"The `public.skipExportGlyphs` list of all UFOs must either not "
"exist or be the same in every UFO."
)
return designspace | 0.002518 |
def fetch_googl():
"""Returns stock prices for Google company."""
yql = YQL('GOOGL', '2014-01-01', '2014-01-10')
for item in yql:
print item.get('date'), item.get('price')
yql.select('GOOGL', '2014-01-01', '2014-01-10')
for item in yql:
print item.get('date'), item.get('price') | 0.003155 |
def get_enterprise_customer_user(user_id, enterprise_uuid):
"""
Return the object for EnterpriseCustomerUser.
Arguments:
user_id (str): user identifier
enterprise_uuid (UUID): Universally unique identifier for the enterprise customer.
Returns:
(EnterpriseCustomerUser): enterprise customer user record
"""
EnterpriseCustomerUser = apps.get_model('enterprise', 'EnterpriseCustomerUser') # pylint: disable=invalid-name
try:
return EnterpriseCustomerUser.objects.get( # pylint: disable=no-member
enterprise_customer__uuid=enterprise_uuid,
user_id=user_id
)
except EnterpriseCustomerUser.DoesNotExist:
return None | 0.004178 |
def dict(cls):
""" Return a dict containing all of the configuration properties
:returns: (dict) containing all configuration properties.
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Make a copy so we can update any current values obtained from environment
# variables
result = dict(cls._properties)
keys = os.environ.keys()
replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix),
keys)
for envKey in replaceKeys:
key = envKey[len(cls.envPropPrefix):]
key = key.replace('_', '.')
result[key] = os.environ[envKey]
return result | 0.007728 |
def live_dirs(self):
"""Yields directories that must exist for this VersionedTarget to function."""
# The only caller of this function is the workdir cleaning pipeline. It is not clear that the previous_results_dir
# should be returned for that purpose. And, by the time this is called, the contents have already been copied.
if self.has_results_dir:
yield self.results_dir
yield self.current_results_dir
if self.has_previous_results_dir:
yield self.previous_results_dir | 0.013672 |
def _dump_to_text(self, with_stats):
"""
Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order.
"""
return tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='text') | 0.005435 |
def volume(self, volume):
"""See `volume`."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) | 0.013514 |
def _to_mwtab(self):
"""Save :class:`~mwtab.mwtab.MWTabFile` in `mwtab` formatted string.
:return: NMR-STAR string.
:rtype: :py:class:`str`
"""
mwtab_str = io.StringIO()
self.print_file(mwtab_str)
return mwtab_str.getvalue() | 0.007117 |
def github_belspec_files(spec_dir, force: bool = False):
"""Get belspec files from Github repo
Args:
spec_dir: directory to store the BEL Specification and derived files
force: force update of BEL Specifications from Github - skipped if local files less than 1 day old
"""
if not force:
dtnow = datetime.datetime.utcnow()
delta = datetime.timedelta(1)
yesterday = dtnow - delta
for fn in glob.glob(f"{spec_dir}/bel*yaml"):
if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday:
log.info("Skipping BEL Specification update - specs less than 1 day old")
return
repo_url = "https://api.github.com/repos/belbio/bel_specifications/contents/specifications"
params = {}
github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "")
if github_access_token:
params = {"access_token": github_access_token}
r = requests.get(repo_url, params=params)
if r.status_code == 200:
results = r.json()
for f in results:
url = f["download_url"]
fn = os.path.basename(url)
if "yaml" not in fn and "yml" in fn:
fn = fn.replace("yml", "yaml")
r = requests.get(url, params=params, allow_redirects=True)
if r.status_code == 200:
open(f"{spec_dir}/{fn}", "wb").write(r.content)
else:
sys.exit(
f"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}"
)
else:
sys.exit(
f"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}"
) | 0.004011 |
def _hash(self, string, hash_type):
"""Hash a string using MPQ's hash function."""
hash_types = {
'TABLE_OFFSET': 0,
'HASH_A': 1,
'HASH_B': 2,
'TABLE': 3
}
seed1 = 0x7FED7FED
seed2 = 0xEEEEEEEE
for ch in string.upper():
if not isinstance(ch, int): ch = ord(ch)
value = self.encryption_table[(hash_types[hash_type] << 8) + ch]
seed1 = (value ^ (seed1 + seed2)) & 0xFFFFFFFF
seed2 = ch + seed1 + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF
return seed1 | 0.005017 |
def _init_volume_service(self, version):
"""
Method to initialize the Volume Service from the config data
Args:
version (string): Version of Boss API to use.
Returns:
None
Raises:
(KeyError): if given invalid version.
"""
volume_cfg = self._load_config_section(CONFIG_VOLUME_SECTION)
self._token_volume = volume_cfg[CONFIG_TOKEN]
proto = volume_cfg[CONFIG_PROTOCOL]
host = volume_cfg[CONFIG_HOST]
self._volume = VolumeService(host, version)
self._volume.base_protocol = proto
self._volume.set_auth(self._token_volume) | 0.003026 |
def draw_multiplet(ax, fine_state, p, hmin, w, fside='right',
label_separation=1, label_fontsize=15, fsize=10,
deltanu_fontsize=6, proportional=False, text='',
text_pos='top', magnetic_lines=False, **kwds):
r"""We draw a multiplet."""
# We determine the vertical positions, calculated from p[1] up.
hyperfine_states = make_list_of_states([fine_state], 'hyperfine')
h_list = [ei.nu - hyperfine_states[0].nu for ei in hyperfine_states]
h_list = [i/h_list[-1] for i in h_list]
h_min = min([h_list[i+1]-h_list[i] for i in range(len(h_list)-1)])
h_list = [hmin*i/h_min + p[1] for i in h_list]
if proportional:
h_list = [p[1]+i*hmin for i in range(len(hyperfine_states))]
omegaij = [(hyperfine_states[i+1].nu-hyperfine_states[i].nu)/1e6
for i in range(len(hyperfine_states)-1)]
for i in range(len(h_list)):
label = '$\mathrm{F}='+str(hyperfine_states[i].f)+'$'
if magnetic_lines:
maxf = max([eee.f for eee in hyperfine_states])
f = hyperfine_states[i].f
nm = 2*maxf+1
for mf in range(-f, f+1):
draw_state(ax, [p[0]+mf*w/nm, h_list[i]], "", w/nm*0.5,
alignment=fside, fontsize=fsize)
if fside == 'right':
ax.text(p[0]+w+label_separation, h_list[i], label,
fontsize=fsize, horizontalalignment="right",
verticalalignment="center")
elif fside == 'left':
ax.text(p[0]-w-label_separation, h_list[i], label,
fontsize=fsize, horizontalalignment="left",
verticalalignment="center")
else:
draw_state(ax, [p[0], h_list[i]], label, w,
alignment=fside, fontsize=fsize)
for i in range(len(h_list)-1):
hmid = (h_list[i+1]+h_list[i])/2.0-0.5
nu = str(omegaij[i])[:5]
if fside == 'left':
ax.text(p[0]-w/2.0, hmid, r'$'+nu+' \ \mathrm{MHz}$',
fontsize=deltanu_fontsize,
horizontalalignment=fside, verticalalignment='bottom')
else:
ax.text(p[0]+w/2.0, hmid, r'$'+nu+' \ \mathrm{MHz}$',
fontsize=deltanu_fontsize,
horizontalalignment=fside, verticalalignment='bottom')
a = label_separation
if text != '':
if text_pos == 'top':
labelx = p[0]
labely = h_list[-1]+a
ax.text(labelx, labely, '$'+text+'$',
verticalalignment='bottom',
horizontalalignment='center', fontsize=label_fontsize)
elif text_pos == 'right':
labelx = p[0]+w/2+2.0*a
if fside == 'right': labelx = labelx+a*5.0
labely = (h_list[-1]+h_list[0])/2.0
ax.text(labelx, labely, '$'+text+'$',
verticalalignment='center',
horizontalalignment='left', fontsize=label_fontsize)
elif text_pos == 'left':
labelx = p[0]-w/2-2.0*a
if fside == 'left': labelx = labelx-a*5.0
labely = (h_list[-1]+h_list[0])/2.0
ax.text(labelx, labely, '$'+text+'$',
verticalalignment='center',
horizontalalignment='right', fontsize=label_fontsize)
return [[p[0], i] for i in h_list] | 0.002266 |
def elevation(client, locations):
"""
Provides elevation data for locations provided on the surface of the
earth, including depth locations on the ocean floor (which return negative
values)
:param locations: List of latitude/longitude values from which you wish
to calculate elevation data.
:type locations: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:rtype: list of elevation data responses
"""
params = {"locations": convert.shortest_path(locations)}
return client._request("/maps/api/elevation/json", params).get("results", []) | 0.00313 |
def main(argv=None):
"""ben-tpl entry point"""
arguments = cli_common(__doc__, argv=argv)
plugin = 'benchmark' if arguments['benchmark'] else None
if arguments['-g']:
template.generate_config(plugin, arguments['<FILE>'])
else:
with open(arguments['<FILE>']) as istr:
context = json.load(istr)
kwargs = dict(no_input=True, extra_context=context)
if arguments['--output-dir']:
kwargs.update(output_dir=arguments['--output-dir'])
if arguments['--interactive']:
kwargs.update(no_input=False)
logging.info(
'generating template in directory ' + kwargs.get('output_dir', os.getcwd())
)
template.generate_template(plugin, **kwargs) | 0.002642 |
def to_det_id(self, det_id_or_det_oid):
"""Convert det ID or OID to det ID"""
try:
int(det_id_or_det_oid)
except ValueError:
return self.get_det_id(det_id_or_det_oid)
else:
return det_id_or_det_oid | 0.007547 |
def work(self, burst=False, logging_level=logging.INFO):
"""
Spawning a greenlet to be able to kill it when it's blocked dequeueing job
:param burst: if it's burst worker don't need to spawn a greenlet
"""
# If the is a burst worker it's not needed to spawn greenlet
if burst:
return self._work(burst, logging_level=logging_level)
self.gevent_worker = gevent.spawn(self._work, burst)
self.gevent_worker.join()
return self.gevent_worker.value | 0.005703 |
def sphinx_dir(self):
"""Returns directory with sphinx documentation, if there is such.
Returns:
Full path to sphinx documentation dir inside the archive, or None
if there is no such.
"""
# search for sphinx dir doc/ or docs/ under the first directory in
# archive (e.g. spam-1.0.0/doc)
candidate_dirs = self.archive.get_directories_re(
settings.SPHINX_DIR_RE, full_path=True)
# search for conf.py in the dirs (TODO: what if more are found?)
for directory in candidate_dirs:
contains_conf_py = self.archive.get_files_re(
r'{0}/conf.py$'.format(re.escape(directory)), full_path=True)
in_tests = 'tests' in directory.split(os.sep)
if contains_conf_py and not in_tests:
return directory | 0.002347 |
def numeric(basetype, min_=None, max_=None):
"""Validator for numeric params"""
min_ = basetype(min_) if min_ is not None else None
max_ = basetype(max_) if max_ is not None else None
def _numeric(string):
value = basetype(string)
if min_ is not None and value < min_ or max_ is not None and value > max_:
msg = "%r not in valid range %r" % (string, (min_, max_))
raise argparse.ArgumentTypeError(msg)
return value
_numeric.__name__ = basetype.__name__
return _numeric | 0.00369 |
def _MakeGroupFromRootSection(root_section, undefined_str):
"""Construct a dictinary { template name -> Template() instance }
Args:
root_section: _Section instance -- root of the original parse tree
"""
group = {}
for statement in root_section.Statements():
if isinstance(statement, six.string_types):
continue
func, args = statement
# here the function acts as ID for the block type
if func is _DoDef and isinstance(args, _Section):
section = args
# Construct a Template instance from a this _Section subtree
t = Template._FromSection(section, group, undefined_str)
group[section.section_name] = t
return group | 0.001374 |
def make_diffuse_comp_info_dict(self, galkey):
""" Make a dictionary maping from merged component to information about that component
Parameters
----------
galkey : str
A short key identifying the galprop parameters
"""
galprop_rings = self.read_galprop_rings_yaml(galkey)
ring_limits = galprop_rings.get('ring_limits')
comp_dict = galprop_rings.get('diffuse_comp_dict')
remove_rings = galprop_rings.get('remove_rings', [])
diffuse_comp_info_dict = {}
nring = len(ring_limits) - 1
for source_key in sorted(comp_dict.keys()):
for iring in range(nring):
source_name = "%s_%i" % (source_key, iring)
if source_name in remove_rings:
continue
full_key = "%s_%s" % (source_name, galkey)
diffuse_comp_info_dict[full_key] =\
self.make_diffuse_comp_info(source_name, galkey)
self._diffuse_comp_info_dicts[galkey] = diffuse_comp_info_dict
return diffuse_comp_info_dict | 0.00273 |
def get_pmid_by_keyword(keyword: str,
graph: Optional[BELGraph] = None,
pubmed_identifiers: Optional[Set[str]] = None,
) -> Set[str]:
"""Get the set of PubMed identifiers beginning with the given keyword string.
:param keyword: The beginning of a PubMed identifier
:param graph: A BEL graph
:param pubmed_identifiers: A set of pre-cached PubMed identifiers
:return: A set of PubMed identifiers starting with the given string
"""
if pubmed_identifiers is not None:
return {
pubmed_identifier
for pubmed_identifier in pubmed_identifiers
if pubmed_identifier.startswith(keyword)
}
if graph is None:
raise ValueError('Graph not supplied')
return {
pubmed_identifier
for pubmed_identifier in iterate_pubmed_identifiers(graph)
if pubmed_identifier.startswith(keyword)
} | 0.003106 |
def create_select_query(self, tokens):
"""
Parse tokens of select query
:param tokens: A list of InfluxDB query tokens
"""
if not tokens[Keyword.SELECT]:
return None
if not tokens[Keyword.FROM]:
return None
return SelectQuery(
self.parse_keyword(Keyword.SELECT, tokens),
self.parse_keyword(Keyword.FROM, tokens),
where_stmt=self.parse_keyword(Keyword.WHERE, tokens),
limit_stmt=self.parse_keyword(Keyword.LIMIT, tokens),
group_by_stmt=self.parse_group(tokens),
duration=self.parsed_time_overlap.timespan_seconds(),
resolution=self.parsed_resolution,
time_ranges=self.parsed_time,
time_overlap=self.parsed_time_overlap,
datapoints=self.parsed_datapoints
) | 0.002317 |
def parse_mode(mode, default_bitdepth=None):
"""Parse PIL-style mode and return tuple (grayscale, alpha, bitdeph)"""
# few special cases
if mode == 'P':
# Don't know what is pallette
raise Error('Unknown colour mode:' + mode)
elif mode == '1':
# Logical
return (True, False, 1)
elif mode == 'I':
# Integer
return (True, False, 16)
# here we go
if mode.startswith('L'):
grayscale = True
mode = mode[1:]
elif mode.startswith('RGB'):
grayscale = False
mode = mode[3:]
else:
raise Error('Unknown colour mode:' + mode)
if mode.startswith('A'):
alpha = True
mode = mode[1:]
else:
alpha = False
bitdepth = default_bitdepth
if mode.startswith(';'):
mode = mode[1:]
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error('Unsupported bitdepth mode:' + mode)
return (grayscale, alpha, bitdepth) | 0.000971 |
def bed(args):
"""
%prog bed pslfile
Convert to bed format.
"""
p = OptionParser(bed.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pslfile, = args
fw = must_open(opts.outfile, "w")
psl = Psl(pslfile)
for p in psl:
print(p.bed12line, file=fw) | 0.00271 |
def multi_shift(df, window):
"""
get last N rows RELATIVE to another row in pandas
http://stackoverflow.com/questions/25724056/how-to-get-last-n-rows-relative-to-another-row-in-pandas-vector-solution
"""
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
dfs = [df.shift(i) for i in np.arange(window)]
for ix, df_item in enumerate(dfs[1:]):
dfs[ix + 1].columns = [str(col) for col in df_item.columns + str(ix + 1)]
return pd.concat(dfs, 1, sort=True) | 0.004 |
def pop_item(self):
"""T.pop_item() -> (k, v), remove and return some (key, value) pair as a
2-tuple; but raise KeyError if T is empty.
"""
if self.is_empty():
raise KeyError("pop_item(): tree is empty")
node = self._root
while True:
if node.left is not None:
node = node.left
elif node.right is not None:
node = node.right
else:
break
key = node.key
value = node.value
self.remove(key)
return key, value | 0.005172 |
def _is_authenticated(self):
"""Checks if credentials allow for authenticated carto access"""
if not self.auth_api_client.is_valid_api_key():
raise CartoException(
'Cannot authenticate user `{}`. Check credentials.'.format(
self.creds.username())) | 0.006431 |
def gev_expval(xi, mu=0, sigma=1):
"""
Expected value of generalized extreme value distribution.
"""
return mu - (sigma / xi) + (sigma / xi) * flib.gamfun(1 - xi) | 0.005618 |
def load_handfile(path, importer=None):
"""
导入传入的 ``handfile`` 文件路径,并返回(docstring, callables)
也就是 handfile 包的 ``__doc__`` 属性 (字符串) 和一个 ``{'name': callable}``
的字典,包含所有通过 mohand 的 command 测试的 callables
:param str path: 待导入的 handfile 文件路径
:param function importer: 可选,包导入函数,默认为 ``__import__``
:return: 包描述文档,仅含终端命令函数的对象字典
:rtype: (str, dict(str, object))
"""
if importer is None:
importer = __import__
# 获取路径&文件名
directory, handfile = os.path.split(path)
# 如果路径不在 ``PYTHONPATH`` 中,则添加,以便于我们的导入正常工作
added_to_path = False
index = None
if directory not in sys.path:
sys.path.insert(0, directory)
added_to_path = True
# 如果路径在 ``PYTHONPATH`` 中,则临时将其移到最前,否则其他的 ``handfile``
# 文件将会被优先导入,而不是我们想要导入的那个
else:
i = sys.path.index(directory)
if i != 0:
# 为之后的恢复保存索引号
index = i
# 添加到最前,然后删除原始位置
sys.path.insert(0, directory)
del sys.path[i + 1]
# 执行导入(去除 .py 扩展名)
sys_byte_code_bak = sys.dont_write_bytecode
sys.dont_write_bytecode = True
imported = importer(os.path.splitext(handfile)[0])
sys.dont_write_bytecode = sys_byte_code_bak
# 从 ``PYTHONPATH`` 中移除我们自己添加的路径
# (仅仅出于严谨,尽量不污染 ``PYTHONPATH`` )
if added_to_path:
del sys.path[0]
# 将我们移动的 PATH 放回原处
if index is not None:
sys.path.insert(index + 1, directory)
del sys.path[0]
# 实际加载 Command
docstring, commands = get_commands_from_module(imported)
return docstring, commands | 0.000637 |
def entrypoint_script(*lines):
'write lines to /entrypoint and hint it as default command'
lines = list(lines)
if lines and not lines[0].startswith('#!'):
lines.insert(0, '#!/bin/sh')
return eval([['entrypoint', '/entrypoint'],
['write-script', '/entrypoint'] + lines]) | 0.003226 |
def convert_timestamps(obj):
"""
Convert unix timestamps in the scraper output to python datetimes
so that they will be saved properly as Mongo datetimes.
"""
for key in ('date', 'when', 'end', 'start_date', 'end_date'):
value = obj.get(key)
if value:
try:
obj[key] = _timestamp_to_dt(value)
except TypeError:
raise TypeError("expected float for %s, got %s" % (key, value))
for key in ('sources', 'actions', 'votes', 'roles'):
for child in obj.get(key, []):
convert_timestamps(child)
return obj | 0.001623 |
def rollout(env, acts):
"""
Perform a rollout using a preset collection of actions
"""
total_rew = 0
env.reset()
steps = 0
for act in acts:
_obs, rew, done, _info = env.step(act)
steps += 1
total_rew += rew
if done:
break
return steps, total_rew | 0.003106 |
def decrease_reads_in_percent(
current_provisioning, percent, min_provisioned_reads, log_tag):
""" Decrease the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we decrease with
:type min_provisioned_reads: int
:param min_provisioned_reads: Configured min provisioned reads
:type log_tag: str
:param log_tag: Prefix for the log
:returns: int -- New provisioning value
"""
percent = float(percent)
decrease = int(float(current_provisioning)*(float(percent)/100))
updated_provisioning = current_provisioning - decrease
min_provisioned_reads = __get_min_reads(
current_provisioning,
min_provisioned_reads,
log_tag)
if updated_provisioning < min_provisioned_reads:
logger.info(
'{0} - Reached provisioned reads min limit: {1:d}'.format(
log_tag, int(min_provisioned_reads)))
return min_provisioned_reads
logger.debug(
'{0} - Read provisioning will be decreased to {1:d} units'.format(
log_tag, int(updated_provisioning)))
return updated_provisioning | 0.0008 |
def apply(self, collection, ops, **kwargs):
"""Apply the filter to collection."""
validator = lambda obj: all(op(obj, val) for (op, val) in ops) # noqa
return [o for o in collection if validator(o)] | 0.013453 |
def setRandomCenters(self, dim, weight, seed):
"""
Set the initial centres to be random samples from
a gaussian population with constant weights.
"""
rng = random.RandomState(seed)
clusterCenters = rng.randn(self._k, dim)
clusterWeights = tile(weight, self._k)
self._model = StreamingKMeansModel(clusterCenters, clusterWeights)
return self | 0.004866 |
def write_diversity_metrics(data, sample_ids, fp=None):
"""
Given a dictionary of diversity calculations (keyed by method)
write out the data to a file.
"""
if fp is None:
fp = "./diversity_data.txt"
with open(fp, "w") as outf:
out = csv.writer(outf, delimiter="\t")
out.writerow(["SampleID", "Group", "Calculation"])
for group, d in data.iteritems():
for sid, value in d.iteritems():
out.writerow([sid, group, value]) | 0.001988 |
def _on_interface_opened(self, success, result, failure_reason, context, next_characteristic=None):
"""Callback function called when the notification related to an interface has been enabled.
It is executed in the baBLE working thread: should not be blocking.
Args:
success (bool): A bool indicating that the operation is successful or not
result (dict): Information (if successful)
failure_reason (any): An object indicating the reason why the operation is not successful (else None)
context (dict): The connection context
next_characteristic (bable_interface.Characteristic): If not None, indicate another characteristic to enable
notification.
"""
if not success:
self.connections.finish_operation(context['connection_id'], False, failure_reason)
return
if next_characteristic is not None:
self.bable.set_notification(
enabled=True,
connection_handle=context['connection_handle'],
characteristic=next_characteristic,
on_notification_set=[self._on_interface_opened, context],
on_notification_received=self._on_notification_received,
sync=False
)
else:
self.connections.finish_operation(context['connection_id'], True, None) | 0.00636 |
def get_comment_section(self, force_reload=False, reverse=False):
"""Get CommentSection instance representing all comments for thread.
:arg force_reload=False: Whether to force reloading comments
directly or allow using what is cached
in self.content if possible.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: CommentSection representing all comments for thread.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: High-level function called by user to get comments.
"""
if self.content is not None and not force_reload:
return self.content
if self.thread_id is None:
self.thread_id = self.lookup_thread_id()
self.content = self.lookup_comments(reverse=reverse)
return self.content | 0.002191 |
def update_dns(self, new_ip):
"""Call No-IP API based on dict login_info and return the status code.
"""
headers = None
if self.auth_type == 'T':
api_call_url = self._base_url.format(hostname=self.hostname,
token=self.auth.token,
ip=new_ip)
else:
api_call_url = self._base_url.format(hostname=self.hostname,
ip=new_ip)
headers = {
'Authorization': "Basic %s" %
self.auth.base64key.decode('utf-8'),
'User-Agent': "%s/%s %s" % (__title__, __version__, __email__)
}
r = requests.get(api_call_url, headers=headers)
self.last_ddns_response = str(r.text).strip()
return r.status_code, r.text | 0.002193 |
def copy_analysis_files(cls, orig_dir, dest_dir, copyfiles):
""" Copy a list of files from orig_dir to dest_dir"""
for pattern in copyfiles:
glob_path = os.path.join(orig_dir, pattern)
files = glob.glob(glob_path)
for ff in files:
f = os.path.basename(ff)
orig_path = os.path.join(orig_dir, f)
dest_path = os.path.join(dest_dir, f)
try:
copyfile(orig_path, dest_path)
except IOError:
sys.stderr.write("WARNING: failed to copy %s\n" % orig_path) | 0.00487 |
def extend(*args):
"""shallow dictionary merge
Args:
a: dict to extend
b: dict to apply to a
Returns:
new instance of the same type as _a_, with _a_ and _b_ merged.
"""
if not args:
return {}
first = args[0]
rest = args[1:]
out = type(first)(first)
for each in rest:
out.update(each)
return out | 0.002653 |
def guessoffset(args):
"""
%prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
"""
p = OptionParser(guessoffset.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
offset = 64
while rec:
quality = rec.quality
lowcounts = len([x for x in quality if x < 59])
highcounts = len([x for x in quality if x > 74])
diff = highcounts - lowcounts
if diff > 10:
break
elif diff < -10:
offset = 33
break
rec = next(ai)
if offset == 33:
print("Sanger encoding (offset=33)", file=sys.stderr)
elif offset == 64:
print("Illumina encoding (offset=64)", file=sys.stderr)
return offset | 0.001518 |
def document_geom(geom):
"""
Create a structured documentation for the geom
It replaces `{usage}`, `{common_parameters}` and
`{aesthetics}` with generated documentation.
"""
# Dedented so that it lineups (in sphinx) with the part
# generated parts when put together
docstring = dedent(geom.__doc__)
# usage
signature = make_signature(geom.__name__,
geom.DEFAULT_PARAMS,
common_geom_params,
common_geom_param_values)
usage = GEOM_SIGNATURE_TPL.format(signature=signature)
# aesthetics
contents = OrderedDict(('**{}**'.format(ae), '')
for ae in sorted(geom.REQUIRED_AES))
if geom.DEFAULT_AES:
d = geom.DEFAULT_AES.copy()
d['group'] = '' # All geoms understand the group aesthetic
contents.update(sorted(d.items()))
table = dict_to_table(('Aesthetic', 'Default value'), contents)
aesthetics_table = AESTHETICS_TABLE_TPL.format(table=table)
tpl = dedent(geom._aesthetics_doc.lstrip('\n'))
aesthetics_doc = tpl.format(aesthetics_table=aesthetics_table)
aesthetics_doc = indent(aesthetics_doc, ' '*4)
# common_parameters
d = geom.DEFAULT_PARAMS
common_parameters = GEOM_PARAMS_TPL.format(
default_stat=d['stat'],
default_position=d['position'],
default_na_rm=d['na_rm'],
default_inherit_aes=d.get('inherit_aes', True),
_aesthetics_doc=aesthetics_doc,
**common_params_doc)
docstring = docstring.replace('{usage}', usage)
docstring = docstring.replace('{common_parameters}',
common_parameters)
geom.__doc__ = docstring
return geom | 0.000569 |
def admin_obj_link(obj, display=''):
"""Returns a link to the django admin change list with a filter set to
only the object given.
:param obj:
Object to create the admin change list display link for
:param display:
Text to display in the link. Defaults to string call of the object
:returns:
Text containing HTML for a link
"""
# get the url for the change list for this object
url = reverse('admin:%s_%s_changelist' % (obj._meta.app_label,
obj._meta.model_name))
url += '?id__exact=%s' % obj.id
text = str(obj)
if display:
text = display
return format_html('<a href="{}">{}</a>', url, text) | 0.002928 |
def xray_driver_removed_handler(self, unused_channel, data):
"""Handle a notification that a driver has been removed.
Args:
unused_channel: The message channel.
data: The message data.
"""
gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
data, 0)
driver_data = gcs_entries.Entries(0)
message = ray.gcs_utils.DriverTableData.GetRootAsDriverTableData(
driver_data, 0)
driver_id = message.DriverId()
logger.info("Monitor: "
"XRay Driver {} has been removed.".format(
binary_to_hex(driver_id)))
self._xray_clean_up_entries_for_driver(driver_id) | 0.00277 |
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content trnasfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header | 0.000898 |
def get_mac_address_table_output_mac_address_table_vlanid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_address_table = ET.Element("get_mac_address_table")
config = get_mac_address_table
output = ET.SubElement(get_mac_address_table, "output")
mac_address_table = ET.SubElement(output, "mac-address-table")
mac_address_key = ET.SubElement(mac_address_table, "mac-address")
mac_address_key.text = kwargs.pop('mac_address')
vlanid = ET.SubElement(mac_address_table, "vlanid")
vlanid.text = kwargs.pop('vlanid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.00277 |
def create_apikey_model(user_model):
""" Generate ApiKey model class and connect it with :user_model:.
ApiKey is generated with relationship to user model class :user_model:
as a One-to-One relationship with a backreference.
ApiKey is set up to be auto-generated when a new :user_model: is created.
Returns ApiKey document class. If ApiKey is already defined, it is not
generated.
Arguments:
:user_model: Class that represents user model for which api keys will
be generated and with which ApiKey will have relationship.
"""
try:
return engine.get_document_cls('ApiKey')
except ValueError:
pass
fk_kwargs = {
'ref_column': None,
}
if hasattr(user_model, '__tablename__'):
fk_kwargs['ref_column'] = '.'.join([
user_model.__tablename__, user_model.pk_field()])
fk_kwargs['ref_column_type'] = user_model.pk_field_type()
class ApiKey(engine.BaseDocument):
__tablename__ = 'nefertari_apikey'
id = engine.IdField(primary_key=True)
token = engine.StringField(default=create_apikey_token)
user = engine.Relationship(
document=user_model.__name__,
uselist=False,
backref_name='api_key',
backref_uselist=False)
user_id = engine.ForeignKeyField(
ref_document=user_model.__name__,
**fk_kwargs)
def reset_token(self):
self.update({'token': create_apikey_token()})
return self.token
# Setup ApiKey autogeneration on :user_model: creation
ApiKey.autogenerate_for(user_model, 'user')
return ApiKey | 0.000597 |
def compute(self):
"""Compute and return the signature according to the given data."""
if "Signature" in self.params:
raise RuntimeError("Existing signature in parameters")
if self.signature_version is not None:
version = self.signature_version
else:
version = self.params["SignatureVersion"]
if str(version) == "1":
bytes = self.old_signing_text()
hash_type = "sha1"
elif str(version) == "2":
bytes = self.signing_text()
if self.signature_method is not None:
signature_method = self.signature_method
else:
signature_method = self.params["SignatureMethod"]
hash_type = signature_method[len("Hmac"):].lower()
else:
raise RuntimeError("Unsupported SignatureVersion: '%s'" % version)
return self.creds.sign(bytes, hash_type) | 0.002134 |
def fetch_query_from_pgdb(self, qname, query, con, cxn, limit=None, force=False):
"""
Supply either an already established connection, or connection parameters.
The supplied connection will override any separate cxn parameter
:param qname: The name of the query to save the output to
:param query: The SQL query itself
:param con: The already-established connection
:param cxn: The postgres connection information
:param limit: If you only want a subset of rows from the query
:return:
"""
if con is None and cxn is None:
LOG.error("ERROR: you need to supply connection information")
return
if con is None and cxn is not None:
con = psycopg2.connect(
host=cxn['host'], database=cxn['database'], port=cxn['port'],
user=cxn['user'], password=cxn['password'])
outfile = '/'.join((self.rawdir, qname))
cur = con.cursor()
# wrap the query to get the count
countquery = ' '.join(("SELECT COUNT(*) FROM (", query, ") x"))
if limit is not None:
countquery = ' '.join((countquery, "LIMIT", str(limit)))
# check local copy.
# assume that if the # rows are the same, that the table is the same
# TEC - opinion:
# the only thing to assume is that if the counts are different
# is the data could not be the same.
#
# i.e: for MGI, the dbinfo table has a single row that changes
# to check if they are the same sort & compare digests. (
filerowcount = -1
tablerowcount = -1
if not force:
if os.path.exists(outfile):
# get rows in the file
filerowcount = self.file_len(outfile)
LOG.info("INFO: rows in local file: %s", filerowcount)
# get rows in the table
# tablerowcount=cur.rowcount
cur.execute(countquery)
tablerowcount = cur.fetchone()[0]
# rowcount-1 because there's a header
if force or filerowcount < 0 or (filerowcount-1) != tablerowcount:
if force:
LOG.info("Forcing download of %s", qname)
else:
LOG.info(
"%s local (%s) different from remote (%s); fetching.",
qname, filerowcount, tablerowcount)
# download the file
LOG.debug("COMMAND:%s", query)
outputquery = """
COPY ({0}) TO STDOUT WITH DELIMITER AS '\t' CSV HEADER""".format(query)
with open(outfile, 'w') as f:
cur.copy_expert(outputquery, f)
# Regenerate row count to check integrity
filerowcount = self.file_len(outfile)
if (filerowcount-1) < tablerowcount:
raise Exception(
"Download from %s failed, %s != %s", cxn['host'] + ':' +
cxn['database'], (filerowcount-1), tablerowcount)
elif (filerowcount-1) > tablerowcount:
LOG.warning(
"Fetched from %s more rows in file (%s) than reported in count(%s)",
cxn['host'] + ':'+cxn['database'], (filerowcount-1), tablerowcount)
else:
LOG.info("local data same as remote; reusing.")
return | 0.001772 |
def getFreeEnergyDifferences(self, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
"""Get the dimensionless free energy differences and uncertainties among all thermodynamic states.
Parameters
----------
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method,
or None to use default. See help for computeAsymptoticCovarianceMatrix()
for more information on various methods. (default: svd)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude
than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
Deltaf_ij :L np.ndarray, float, shape=(K, K)
Deltaf_ij[i,j] is the estimated free energy difference
dDeltaf_ij :L np.ndarray, float, shape=(K, K)
dDeltaf_ij[i,j] is the estimated statistical uncertainty
(one standard deviation) in Deltaf_ij[i,j]
Notes
-----
Computation of the covariance matrix may take some time for large K.
The reported statistical uncertainty should, in the asymptotic limit, reflect one standard deviation for the normal distribution of the estimate.
The true free energy difference should fall within the interval [-df, +df] centered on the estimate 68% of the time, and within
the interval [-2 df, +2 df] centered on the estimate 95% of the time.
This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit.
See Section III of Reference [1].
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> [Deltaf_ij, dDeltaf_ij] = mbar.getFreeEnergyDifferences()
"""
# Compute free energy differences.
f_i = np.matrix(self.f_k)
Deltaf_ij = f_i - f_i.transpose()
# zero out numerical error for thermodynamically identical states
self._zerosamestates(Deltaf_ij)
returns = []
returns.append(np.array(Deltaf_ij))
if compute_uncertainty or return_theta:
# Compute asymptotic covariance matrix.
Theta_ij = self._computeAsymptoticCovarianceMatrix(
np.exp(self.Log_W_nk), self.N_k, method=uncertainty_method)
if compute_uncertainty:
# compute the covariance component without doing the double loop.
# d2DeltaF = Theta_ij[i,i] + Theta_ij[j,j] - 2.0 * Theta_ij[i,j]
diag = Theta_ij.diagonal()
d2DeltaF = diag + diag.transpose() - 2 * Theta_ij
# zero out numerical error for thermodynamically identical states
self._zerosamestates(d2DeltaF)
# check for any numbers below zero.
if (np.any(d2DeltaF < 0.0)):
if(np.any(d2DeltaF) < warning_cutoff):
# Hmm. Will this print correctly?
print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)])
else:
d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0
# take the square root of the entries of the matrix
dDeltaf_ij = np.sqrt(d2DeltaF)
# Return matrix of free energy differences and uncertainties.
returns.append(np.array(dDeltaf_ij))
if (return_theta):
returns.append(np.array(Theta_ij))
return returns | 0.003037 |
def _get_asset_load(self, asset_type):
"""
Helper function to dynamically create *_load_time properties. Return
value is in ms.
"""
if asset_type == 'initial':
return self.actual_page['time']
elif asset_type == 'content':
return self.pageTimings['onContentLoad']
elif asset_type == 'page':
if self.page_id == 'unknown':
return None
return self.pageTimings['onLoad']
# TODO - should we return a slightly fake total load time to
# accomodate HAR data that cannot understand things like JS
# rendering or just throw a warning?
#return self.get_load_time(request_type='.*',content_type='.*', status_code='.*', asynchronous=False)
else:
return self.get_load_time(
content_type=self.asset_types[asset_type]
) | 0.004357 |
def ecg_process(ecg, rsp=None, sampling_rate=1000, filter_type="FIR", filter_band="bandpass", filter_frequency=[3, 45], segmenter="hamilton", quality_model="default", hrv_features=["time", "frequency"], age=None, sex=None, position=None):
"""
Automated processing of ECG and RSP signals.
Parameters
----------
ecg : list or ndarray
ECG signal array.
rsp : list or ndarray
Respiratory (RSP) signal array.
sampling_rate : int
Sampling rate (samples/second).
filter_type : str
Can be Finite Impulse Response filter ("FIR"), Butterworth filter ("butter"), Chebyshev filters ("cheby1" and "cheby2"), Elliptic filter ("ellip") or Bessel filter ("bessel").
filter_band : str
Band type, can be Low-pass filter ("lowpass"), High-pass filter ("highpass"), Band-pass filter ("bandpass"), Band-stop filter ("bandstop").
filter_frequency : int or list
Cutoff frequencies, format depends on type of band: "lowpass" or "bandpass": single frequency (int), "bandpass" or "bandstop": pair of frequencies (list).
segmenter : str
The cardiac phase segmenter. Can be "hamilton", "gamboa", "engzee", "christov" or "ssf". See :func:`neurokit.ecg_preprocess()` for details.
quality_model : str
Path to model used to check signal quality. "default" uses the builtin model. None to skip this function.
hrv_features : list
What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'. None to skip this function.
age : float
Subject's age for adjusted HRV.
sex : str
Subject's gender ("m" or "f") for adjusted HRV.
position : str
Recording position. To compare with data from Voss et al. (2015), use "supine".
Returns
----------
processed_ecg : dict
Dict containing processed ECG features.
Contains the ECG raw signal, the filtered signal, the R peaks indexes, HRV features, all the heartbeats, the Heart Rate, the RSP filtered signal (if respiration provided) and the respiratory sinus arrhythmia (RSA).
Example
----------
>>> import neurokit as nk
>>> processed_ecg = nk.ecg_process(ecg_signal, resp_signal)
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
- **RSA**: Respiratory sinus arrhythmia (RSA) is a naturally occurring variation in heart rate that occurs during the breathing cycle, serving as a measure of parasympathetic nervous system activity. See :func:`neurokit.ecg_rsa()` for details.
- **HRV**: Heart-Rate Variability (HRV) is a finely tuned measure of heart-brain communication, as well as a strong predictor of morbidity and death (Zohar et al., 2013). It describes the complex variation of beat-to-beat intervals mainly controlled by the autonomic nervous system (ANS) through the interplay of sympathetic and parasympathetic neural activity at the sinus node. In healthy subjects, the dynamic cardiovascular control system is characterized by its ability to adapt to physiologic perturbations and changing conditions maintaining the cardiovascular homeostasis (Voss, 2015). In general, the HRV is influenced by many several factors like chemical, hormonal and neural modulations, circadian changes, exercise, emotions, posture and preload. There are several procedures to perform HRV analysis, usually classified into three categories: time domain methods, frequency domain methods and non-linear methods. See :func:`neurokit.ecg_hrv()` for a description of indices.
- **Adjusted HRV**: The raw HRV features are normalized :math:`(raw - Mcluster) / sd` according to the participant's age and gender. In data from Voss et al. (2015), HRV analysis was performed on 5-min ECG recordings (lead II and lead V2 simultaneously, 500 Hz sample rate) obtained in supine position after a 5–10 minutes resting phase. The cohort of healthy subjects consisted of 782 women and 1124 men between the ages of 25 and 74 years, clustered into 4 groups: YF (Female, Age = [25-49], n=571), YM (Male, Age = [25-49], n=744), EF (Female, Age = [50-74], n=211) and EM (Male, Age = [50-74], n=571).
- **Systole/Diastole**: One prominent channel of body and brain communication is that conveyed by baroreceptors, pressure and stretch-sensitive receptors within the heart and surrounding arteries. Within each cardiac cycle, bursts of baroreceptor afferent activity encoding the strength and timing of each heartbeat are carried via the vagus and glossopharyngeal nerve afferents to the nucleus of the solitary tract. This is the principal route that communicates to the brain the dynamic state of the heart, enabling the representation of cardiovascular arousal within viscerosensory brain regions, and influence ascending neuromodulator systems implicated in emotional and motivational behaviour. Because arterial baroreceptors are activated by the arterial pulse pressure wave, their phasic discharge is maximal during and immediately after the cardiac systole, that is, when the blood is ejected from the heart, and minimal during cardiac diastole, that is, between heartbeats (Azevedo, 2017).
- **ECG Signal Quality**: Using the PTB-Diagnostic dataset available from PhysioNet, we extracted all the ECG signals from the healthy participants, that contained 15 recording leads/subject. We extracted all cardiac cycles, for each lead, and downsampled them from 600 to 200 datapoints. Note that we dropped the 8 first values that were NaNs. Then, we fitted a neural network model on 2/3 of the dataset (that contains 134392 cardiac cycles) to predict the lead. Model evaluation was done on the remaining 1/3. The model show good performances in predicting the correct recording lead (accuracy=0.91, precision=0.91). In this function, this model is fitted on each cardiac cycle of the provided ECG signal. It returns the probable recording lead (the most common predicted lead), the signal quality of each cardiac cycle (the probability of belonging to the probable recording lead) and the overall signal quality (the mean of signal quality). See creation `scripts <https://github.com/neuropsychology/NeuroKit.py/tree/master/utils/ecg_signal_quality_model_creation>`_.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- Rhenan Bartels (https://github.com/rhenanbartels)
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
- hrv: https://github.com/rhenanbartels/hrv
- RHRV: http://rhrv.r-forge.r-project.org/
References
------------
- Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381.
- Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308.
- Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32.
- Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585.
- Azevedo, R. T., Garfinkel, S. N., Critchley, H. D., & Tsakiris, M. (2017). Cardiac afferent activity modulates the expression of racial stereotypes. Nature communications, 8.
- Edwards, L., Ring, C., McIntyre, D., & Carroll, D. (2001). Modulation of the human nociceptive flexion reflex across the cardiac cycle. Psychophysiology, 38(4), 712-718.
- Gray, M. A., Rylander, K., Harrison, N. A., Wallin, B. G., & Critchley, H. D. (2009). Following one's heart: cardiac rhythms gate central initiation of sympathetic reflexes. Journal of Neuroscience, 29(6), 1817-1825.
"""
# Preprocessing
# =============
processed_ecg = ecg_preprocess(ecg,
sampling_rate=sampling_rate,
filter_type=filter_type,
filter_band=filter_band,
filter_frequency=filter_frequency,
segmenter=segmenter)
# Signal quality
# ===============
if quality_model is not None:
quality = ecg_signal_quality(cardiac_cycles=processed_ecg["ECG"]["Cardiac_Cycles"], sampling_rate=sampling_rate, rpeaks=processed_ecg["ECG"]["R_Peaks"], quality_model=quality_model)
processed_ecg["ECG"].update(quality)
processed_ecg["df"] = pd.concat([processed_ecg["df"], quality["ECG_Signal_Quality"]], axis=1)
# HRV
# =============
if hrv_features is not None:
hrv = ecg_hrv(rpeaks=processed_ecg["ECG"]["R_Peaks"], sampling_rate=sampling_rate, hrv_features=hrv_features)
try:
processed_ecg["df"] = pd.concat([processed_ecg["df"], hrv.pop("df")], axis=1)
except KeyError:
pass
processed_ecg["ECG"]["HRV"] = hrv
if age is not None and sex is not None and position is not None:
processed_ecg["ECG"]["HRV_Adjusted"] = ecg_hrv_assessment(hrv, age, sex, position)
# RSP
# =============
if rsp is not None:
rsp = rsp_process(rsp=rsp, sampling_rate=sampling_rate)
processed_ecg["RSP"] = rsp["RSP"]
processed_ecg["df"] = pd.concat([processed_ecg["df"], rsp["df"]], axis=1)
# RSA
# =============
rsa = ecg_rsa(processed_ecg["ECG"]["R_Peaks"], rsp["df"]["RSP_Filtered"], sampling_rate=sampling_rate)
processed_ecg["ECG"]["RSA"] = rsa
processed_ecg["df"] = pd.concat([processed_ecg["df"], rsa.pop("df")], axis=1)
return(processed_ecg) | 0.002934 |
def get_widget(self, index=None, path=None, tabs=None):
"""Get widget by index.
If no tabs and index specified the current active widget is returned.
"""
if (index and tabs) or (path and tabs):
return tabs.widget(index)
elif self.plugin:
return self.get_plugin_tabwidget(self.plugin).currentWidget()
else:
return self.plugins_tabs[0][0].currentWidget() | 0.004577 |
def load_data(self, filename, *args, **kwargs):
"""
load data from text file.
:param filename: name of file to read
:type filename: str
:returns: data read from file using :func:`numpy.genfromtxt`
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
"""
# header keys
header_param = self.parameters.get('header') # default is None
# data keys
data_param = self.parameters['data'] # raises KeyError if no 'data'
dtype = data_param.get('dtype') # default is None
# if not None convert to tuple and normal ASCII
_utf8_list_to_ascii_tuple(dtype) if dtype else None # -> tuple of str
delimiter = data_param.get('delimiter') # default is None
skip_header = data_param.get('skip_header') # default is None
usecols = data_param.get('usecols') # default is None
names = data_param.get('names') # default is None
names = [str(_) for _ in names] if names else None # -> str
excludelist = data_param.get('excludelist') # default is None
deletechars = data_param.get('deletechars') # default is None
data_units = data_param.get('units', {}) # default is an empty dict
# either dtype or names must be specified
if not (dtype or names):
raise UnnamedDataError(filename)
data = {} # a dictionary for data
# open file for reading
with open(filename, 'r') as fid:
# read header
if header_param:
data.update(_read_header(fid, header_param))
fid.seek(0) # move cursor back to beginning
# data
data_data = np.genfromtxt(fid, dtype, delimiter=delimiter,
skip_header=skip_header, usecols=usecols,
names=names, excludelist=excludelist,
deletechars=deletechars)
# apply units
data.update(_apply_units(data_data, data_units, fid.name))
return data | 0.000949 |
def get_cookie_jar(self):
"""Returns our cookie jar."""
cookie_file = self._get_cookie_file()
cookie_jar = LWPCookieJar(cookie_file)
if os.path.exists(cookie_file):
cookie_jar.load()
else:
safe_mkdir_for(cookie_file)
# Save an empty cookie jar so we can change the file perms on it before writing data to it.
with self._lock:
cookie_jar.save()
os.chmod(cookie_file, 0o600)
return cookie_jar | 0.015487 |
def _check_parameters(parameters, symbols):
"""
Checks that the parameters given are not empty. Ones with prefix symbols
can be denoted by including the prefix in symbols
"""
for param in parameters:
if not param:
raise ValueError(EMPTY_PARAMETER)
elif (param[0] in symbols) and (not param[1:]):
print(param)
raise ValueError(EMPTY_KEYWORD_PARAMETER) | 0.00237 |
def get_number(s, cast=int):
"""
Try to get a number out of a string, and cast it.
"""
import string
d = "".join(x for x in str(s) if x in string.digits)
return cast(d) | 0.005208 |
def delete_attributes(self, item_name, attributes=None,
expected_values=None):
"""
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_attributes(self, item_name, attributes,
expected_values) | 0.007585 |
def _build(self, inputs):
"""Connects the _ConvNDTranspose module into the graph.
If this is not the first time the module has been connected to the graph,
the input Tensor provided here must have the same final N dimensions, in
order for the existing variables to be the correct size for the
multiplication. The batch size may differ for each connection.
Args:
inputs: A Tensor of shape `data_format` and of type
`tf.float16`, `tf.bfloat16` or `tf.float32`.
Returns:
A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16`
or `tf.float32`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time and the inferred size of the input does not match previous
invocations.
base.IncompatibleShapeError: If the input tensor has the wrong number
of dimensions.
base.UnderspecifiedError: If the channel dimension of `inputs` isn't
defined.
base.IncompatibleShapeError: If `output_shape` is an iterable and is not
in the format `(out_height, out_width)`.
TypeError: If input Tensor dtype is not compatible with either
`tf.float16`, `tf.bfloat16` or `tf.float32`.
"""
_verify_inputs(inputs, self._channel_index, self._data_format)
self._input_shape = tuple(inputs.get_shape().as_list())
self._input_channels = self._input_shape[self._channel_index]
# First, figure out what the non-(N,C) dims will be.
if self._use_default_output_shape:
def _default_transpose_size_wrapper():
if self._data_format.startswith("NC"):
input_size = self._input_shape[2:]
stride = self.stride[2:]
else: # self._data_format == N*WC
input_size = self._input_shape[1:-1]
stride = self.stride[1:-1]
return _default_transpose_size(input_size,
stride,
kernel_shape=self._kernel_shape,
padding=self._padding)
self._output_shape = _default_transpose_size_wrapper
if len(self.output_shape) != self._n:
raise base.IncompatibleShapeError(
"Output shape must have rank {}, but instead was {}".format(
self._n, len(self.output_shape)))
# Now, construct the size of the output, including the N + C dims.
output_shape = self._infer_all_output_dims(inputs)
self._w = self._construct_w(inputs)
if self._n == 1:
# Add a dimension for the height.
if self._data_format == DATA_FORMAT_NWC:
h_dim = 1
two_dim_conv_data_format = DATA_FORMAT_NHWC
else: # self._data_format == DATA_FORMAT_NCW
h_dim = 2
two_dim_conv_data_format = DATA_FORMAT_NCHW
inputs = tf.expand_dims(inputs, h_dim)
two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:]
outputs = tf.nn.conv2d_transpose(inputs,
self._w,
output_shape,
strides=two_dim_conv_stride,
padding=self._padding,
data_format=two_dim_conv_data_format)
# Remove the height dimension to return a 3D tensor.
outputs = tf.squeeze(outputs, [h_dim])
elif self._n == 2:
outputs = tf.nn.conv2d_transpose(inputs,
self._w,
output_shape,
strides=self._stride,
padding=self._padding,
data_format=self._data_format)
else:
outputs = tf.nn.conv3d_transpose(inputs,
self._w,
output_shape,
strides=self._stride,
padding=self._padding,
data_format=self._data_format)
if self._use_bias:
self._b, outputs = _apply_bias(
inputs, outputs, self._channel_index, self._data_format,
self._output_channels, self._initializers, self._partitioners,
self._regularizers)
outputs = self._recover_shape_information(inputs, outputs)
return outputs | 0.004286 |
def from_xso_item(cls, xso_item):
"""
Create a :class:`Item` with the :attr:`jid` set to the
:attr:`.xso.Item.jid` obtained from `xso_item`. Then update that
instance with `xso_item` using :meth:`update_from_xso_item` and return
it.
"""
item = cls(xso_item.jid)
item.update_from_xso_item(xso_item)
return item | 0.005249 |
def take_node_screenshot(self, element, screenshot_path):
from PIL import Image
"""Take a screenshot of a node
Args:
element (object): the proxy_element
screenshot_path (str): the path where the screenshot will be saved
"""
temp_path = os.path.join(tempdir, screenshot_path)
el_x = int(element.location['x'])
el_y = int(element.location['y'])
el_height = int(element.size['height'])
el_width = int(element.size['width'])
if el_height == 0 or el_width == 0:
self.debug_log("take_node_screenshot cannot be taken because element width or height equal zero") # noqa
return False
bounding_box = (
el_x,
el_y,
(el_x + el_width),
(el_y + el_height)
)
self._driver.save_screenshot(temp_path)
base_image = Image.open(temp_path)
cropped_image = base_image.crop(bounding_box)
base_image = base_image.resize(cropped_image.size)
base_image.paste(cropped_image, (0, 0))
base_image.save(screenshot_path)
"""
except Exception as e:
tb = traceback.format_exc()
print unicode(tb)
embed()
""" | 0.002333 |
def find_loaders(scheme, protocols=None):
"""
Find all loaders that match the requested scheme and protocols.
:param scheme: Any valid scheme. Examples would be something like ``ini``
or ``ini+pastedeploy``.
:param protocols: Zero or more :term:`loader protocol` identifiers that
the loader must implement. If ``None`` then only generic loaders will
be returned.
:returns: A list containing zero or more :class:`plaster.ILoaderInfo`
objects.
"""
# build a list of all required entry points
matching_groups = ['plaster.loader_factory']
if protocols:
matching_groups += [
'plaster.{0}_loader_factory'.format(proto)
for proto in protocols
]
scheme = scheme.lower()
# if a distribution is specified then it overrides the default search
parts = scheme.split('+', 1)
if len(parts) == 2:
try:
distro = pkg_resources.get_distribution(parts[0])
except pkg_resources.DistributionNotFound:
pass
else:
ep = _find_ep_in_dist(distro, parts[1], matching_groups)
# if we got one or more loaders from a specific distribution
# then they override everything else so we'll just return them
if ep:
return [EntryPointLoaderInfo(ep, protocols)]
# find any distributions supporting the default loader protocol
possible_entry_points = [
ep
for ep in pkg_resources.iter_entry_points('plaster.loader_factory')
if scheme is None or scheme == ep.name.lower()
]
distros = {ep.dist for ep in possible_entry_points}
matched_entry_points = list(filter(None, [
_find_ep_in_dist(distro, scheme, matching_groups)
for distro in distros
]))
return [
EntryPointLoaderInfo(ep, protocols=protocols)
for ep in matched_entry_points
] | 0.00052 |
async def get_blueprint_params(request, left: int, right: int) -> str:
"""
API Description: Multiply, left * right. This will show in the swagger page (localhost:8000/api/v1/).
"""
res = left * right
return "{left}*{right}={res}".format(left=left, right=right, res=res) | 0.00692 |
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels] | 0.001613 |
def reset(self, seed):
"""
Reset this generator's seed generator and any clones.
"""
logger.debug(f'Resetting {self} (seed={seed})')
self.seed_generator.reset(seed)
for c in self.clones:
c.reset(seed) | 0.007663 |
def get_browser_controller(browser=None):
''' Return a browser controller.
Args:
browser (str or None) : browser name, or ``None`` (default: ``None``)
If passed the string ``'none'``, a dummy web browser controller
is returned
Otherwise, use the value to select an appropriate controller using
the ``webbrowser`` standard library module. In the value is
``None`` then a system default is used.
.. note::
If the environment variable ``BOKEH_BROWSER`` is set, it will take
precedence.
Returns:
controller : a web browser controller
'''
browser = settings.browser(browser)
if browser is not None:
if browser == 'none':
controller = DummyWebBrowser()
else:
controller = webbrowser.get(browser)
else:
controller = webbrowser
return controller | 0.001087 |
def _machine_check_connectivity():
"""
This method calls to docker-machine on the command line and
makes sure that it is up and ready.
Potential improvements to be made:
- Support multiple machine names (run a `docker-machine ls` and then
see which machines are active. Use a priority list)
"""
with open(devnull, 'w') as devnull_f:
try:
status = subprocess.check_output(
['docker-machine', 'status', 'dev'],
stderr=devnull_f).strip()
if status == 'Stopped':
raise DatacatsError('Please start your docker-machine '
'VM with "docker-machine start dev"')
# XXX HACK: This exists because of
# http://github.com/datacats/datacats/issues/63,
# as a temporary fix.
if 'tls' in _docker_kwargs:
# It will print out messages to the user otherwise.
_docker_kwargs['tls'].assert_hostname = False
except subprocess.CalledProcessError:
raise DatacatsError('Please create a docker-machine with '
'"docker-machine start dev"') | 0.000826 |
def dateint_to_datetime(dateint):
"""Converts the given dateint to a datetime object, in local timezone.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
datetime.datetime
A timezone-unaware datetime object representing the start of the given
day (so at 0 hours, 0 minutes, etc...) in the local timezone.
"""
if len(str(dateint)) != 8:
raise ValueError(
'Dateints must have exactly 8 digits; the first four representing '
'the year, the next two the months, and the last two the days.')
year, month, day = decompose_dateint(dateint)
return datetime(year=year, month=month, day=day) | 0.001326 |
def _load_package_config(reload_=False):
"""Loads the package configurations from the global `acorn.cfg` file.
"""
global _packages
from acorn.config import settings
packset = settings("acorn", reload_)
if packset.has_section("acorn.packages"):
for package, value in packset.items("acorn.packages"):
_packages[package] = value.strip() == "1" | 0.002597 |
async def lookup_entities(client, args):
"""Search for entities by phone number, email, or gaia_id."""
lookup_spec = _get_lookup_spec(args.entity_identifier)
request = hangups.hangouts_pb2.GetEntityByIdRequest(
request_header=client.get_request_header(),
batch_lookup_spec=[lookup_spec],
)
res = await client.get_entity_by_id(request)
# Print the list of entities in the response.
for entity_result in res.entity_result:
for entity in entity_result.entity:
print(entity) | 0.001869 |
def isNumber(self, value):
"""
Validate whether a value is a number or not
"""
try:
str(value)
float(value)
return True
except ValueError:
return False | 0.008333 |
def remove_child_objective_bank(self, objective_bank_id, child_id):
"""Removes a child from an objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
arg: child_id (osid.id.Id): the ``Id`` of the child
raise: NotFound - ``objective_bank_id`` not a parent of
``child_id``
raise: NullArgument - ``objective_bank_id`` or ``child_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalog(catalog_id=objective_bank_id, child_id=child_id)
return self._hierarchy_session.remove_child(id_=objective_bank_id, child_id=child_id) | 0.003865 |
def initinfo(self) -> Tuple[Union[float, int, bool], bool]:
"""The actual initial value of the given parameter.
Some |Parameter| subclasses define another value for class
attribute `INIT` than |None| to provide a default value.
Let's define a parameter test class and prepare a function for
initialising it and connecting the resulting instance to a
|SubParameters| object:
>>> from hydpy.core.parametertools import Parameter, SubParameters
>>> class Test(Parameter):
... NDIM = 0
... TYPE = float
... TIME = None
... INIT = 2.0
>>> class SubGroup(SubParameters):
... CLASSES = (Test,)
>>> def prepare():
... subpars = SubGroup(None)
... test = Test(subpars)
... test.__hydpy__connect_variable2subgroup__()
... return test
By default, making use of the `INIT` attribute is disabled:
>>> test = prepare()
>>> test
test(?)
Enable it through setting |Options.usedefaultvalues| to |True|:
>>> from hydpy import pub
>>> pub.options.usedefaultvalues = True
>>> test = prepare()
>>> test
test(2.0)
When no `INIT` attribute is defined, enabling
|Options.usedefaultvalues| has no effect, of course:
>>> del Test.INIT
>>> test = prepare()
>>> test
test(?)
For time-dependent parameter values, the `INIT` attribute is assumed
to be related to a |Parameterstep| of one day:
>>> test.parameterstep = '2d'
>>> test.simulationstep = '12h'
>>> Test.INIT = 2.0
>>> Test.TIME = True
>>> test = prepare()
>>> test
test(4.0)
>>> test.value
1.0
"""
init = self.INIT
if (init is not None) and hydpy.pub.options.usedefaultvalues:
with Parameter.parameterstep('1d'):
return self.apply_timefactor(init), True
return variabletools.TYPE2MISSINGVALUE[self.TYPE], False | 0.000946 |
def findAllSubstrings(string, substring):
""" Returns a list of all substring starting positions in string or an empty
list if substring is not present in string.
:param string: a template string
:param substring: a string, which is looked for in the ``string`` parameter.
:returns: a list of substring starting positions in the template string
"""
#TODO: solve with regex? what about '.':
#return [m.start() for m in re.finditer('(?='+substring+')', string)]
start = 0
positions = []
while True:
start = string.find(substring, start)
if start == -1:
break
positions.append(start)
#+1 instead of +len(substring) to also find overlapping matches
start += 1
return positions | 0.007752 |
def _fn_with_meta(f, meta: Optional[lmap.Map]):
"""Return a new function with the given meta. If the function f already
has a meta map, then merge the """
if not isinstance(meta, lmap.Map):
raise TypeError("meta must be a map")
if inspect.iscoroutinefunction(f):
@functools.wraps(f)
async def wrapped_f(*args, **kwargs):
return await f(*args, **kwargs)
else:
@functools.wraps(f) # type: ignore
def wrapped_f(*args, **kwargs):
return f(*args, **kwargs)
wrapped_f.meta = ( # type: ignore
f.meta.update(meta)
if hasattr(f, "meta") and isinstance(f.meta, lmap.Map)
else meta
)
wrapped_f.with_meta = partial(_fn_with_meta, wrapped_f) # type: ignore
return wrapped_f | 0.001261 |
def _Open(self, path_spec, mode='rb'):
"""Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
tsk_image_object = tsk_image.TSKFileSystemImage(file_object)
tsk_file_system = pytsk3.FS_Info(tsk_image_object)
except:
file_object.close()
raise
self._file_object = file_object
self._tsk_file_system = tsk_file_system | 0.007107 |
def __build_signature(self, saml_data, relay_state, saml_type, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1):
"""
Builds the Signature
:param saml_data: The SAML Data
:type saml_data: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param saml_type: The target URL the user should be redirected to
:type saml_type: string SAMLRequest | SAMLResponse
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
"""
assert saml_type in ['SAMLRequest', 'SAMLResponse']
# Load the key into the xmlsec context
key = self.__settings.get_sp_key()
if not key:
raise OneLogin_Saml2_Error(
"Trying to sign the %s but can't load the SP private key" % saml_type,
OneLogin_Saml2_Error.PRIVATE_KEY_NOT_FOUND
)
dsig_ctx = xmlsec.DSigCtx()
dsig_ctx.signKey = xmlsec.Key.loadMemory(key, xmlsec.KeyDataFormatPem, None)
msg = '%s=%s' % (saml_type, quote_plus(saml_data))
if relay_state is not None:
msg += '&RelayState=%s' % quote_plus(relay_state)
msg += '&SigAlg=%s' % quote_plus(sign_algorithm)
# Sign the metadata with our private key.
sign_algorithm_transform_map = {
OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.TransformDsaSha1,
OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.TransformRsaSha1,
OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.TransformRsaSha256,
OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.TransformRsaSha384,
OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.TransformRsaSha512
}
sign_algorithm_transform = sign_algorithm_transform_map.get(sign_algorithm, xmlsec.TransformRsaSha1)
signature = dsig_ctx.signBinary(str(msg), sign_algorithm_transform)
return b64encode(signature) | 0.003024 |
def relpath(path, start):
"""Get relative path to start.
Note: Modeled after python2.6 :meth:`os.path.relpath`.
"""
path_items = path_list(path)
start_items = path_list(start)
# Find common parts of path.
common = []
for pth, stt in zip(path_items, start_items):
if pth != stt:
break
common.append(pth)
# Shared parts index in both lists.
common_ind = len(common)
parent_num = len(start_items) - common_ind
# Start with parent traversal and add relative parts.
rel_items = [PARENT] * parent_num + path_items[common_ind:]
return path_join(*rel_items) | 0.00157 |
def create_long_form_weights(model_obj, wide_weights, rows_to_obs=None):
"""
Converts an array of weights with one element per observation (wide-format)
to an array of weights with one element per observation per available
alternative (long-format).
Parameters
----------
model_obj : an instance or sublcass of the MNDC class.
Should be the model object that corresponds to the model we are
constructing the bootstrap confidence intervals for.
wide_weights : 1D or 2D ndarray.
Should contain one element or one column per observation in
`model_obj.data`, depending on whether `wide_weights` is 1D or 2D
respectively. These elements should be the weights for optimizing the
model's objective function for estimation.
rows_to_obs : 2D scipy sparse array.
A mapping matrix of zeros and ones, were `rows_to_obs[i, j]` is one if
row `i` of the long-format data belongs to observation `j` and zero
otherwise.
Returns
-------
long_weights : 1D or 2D ndarray.
Should contain one element or one column per observation in
`model_obj.data`, depending on whether `wide_weights` is 1D or 2D
respectively. These elements should be the weights from `wide_weights`,
simply mapping each observation's weight to the corresponding row in
the long-format data.
"""
# Ensure argument validity
check_validity_of_long_form_args(model_obj, wide_weights, rows_to_obs)
# Get a rows_to_obs mapping matrix.
if rows_to_obs is None:
rows_to_obs = model_obj.get_mappings_for_fit()['rows_to_obs']
# Create a 2D version of
wide_weights_2d =\
wide_weights if wide_weights.ndim == 2 else wide_weights[:, None]
long_weights = rows_to_obs.dot(wide_weights_2d)
if wide_weights.ndim == 1:
long_weights = long_weights.sum(axis=1)
return long_weights | 0.000517 |
def __get_axes(ax=None, fig=None):
'''Get or construct the target axes object for a new plot.
Parameters
----------
ax : matplotlib.pyplot.axes, optional
If provided, return this axes object directly.
fig : matplotlib.figure.Figure, optional
The figure to query for axes.
By default, uses the current figure `plt.gcf()`.
Returns
-------
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
new_axes : bool
If `True`, the axis object was newly constructed.
If `False`, the axis object already existed.
'''
new_axes = False
if ax is not None:
return ax, new_axes
if fig is None:
import matplotlib.pyplot as plt
fig = plt.gcf()
if not fig.get_axes():
new_axes = True
return fig.gca(), new_axes | 0.001081 |
def to_dict(self,include_node_id=False,no_attributes=False,track_namespaces=False):
"""
This function is currently geared very much towards writing
STIX/CybOX objects to a dictionary. That should not be the case -- the function
needs to be generic just as the from_dict function.
TODO: make function generic.
"""
flat_result = []
def make_ns_slug(name_counter,slug='n'):
while "%s%s" % (slug,name_counter['counter']) in namespace_mapping.values():
name_counter['counter'] = name_counter['counter']+1
return "%s%s" % (slug,name_counter['counter'])
name_counter = {'counter':0}
if track_namespaces:
fact_thrus = self.fact_thru.all().prefetch_related(
'fact__fact_term',
'fact__fact_values',
'fact__fact_values__fact_data_type',
'fact__fact_values__fact_data_type__namespace',
'fact__value_iobject_id',
'fact__value_iobject_id__namespace',
'namespace_map__namespaces_thru__namespace',
'node_id')
else:
fact_thrus = self.fact_thru.all().prefetch_related(
'fact__fact_term',
'fact__fact_values',
'fact__fact_values__fact_data_type',
'fact__fact_values__fact_data_type__namespace',
'fact__value_iobject_id',
'fact__value_iobject_id__namespace',
'node_id')
export_ns_dict = {}
namespace_mapping= {"%s-%s" % (self.iobject_type.namespace.uri,self.iobject_type_revision): 'n0'}
#fact_thrus = self.fact_thru.all()
for fact_thru in fact_thrus:
#print fact_thru.node_id
#print fact_thru.fact.fact_term
#for positional_namespace in fact_thru.namespace_map.namespaces_thru.all():
# print positional_namespace
value_list = []
first = True
fact_datatype_name = None
fact_datatype_ns = None
fact_dict = {'node_id': fact_thru.node_id.name,
'term': fact_thru.fact.fact_term.term,
'attribute' : fact_thru.fact.fact_term.attribute,
'@@namespace_map' : fact_thru.namespace_map,
}
for fact_value in fact_thru.fact.fact_values.all():
if first:
first=False
fact_datatype_name = fact_value.fact_data_type.name
fact_datatype_ns = fact_value.fact_data_type.namespace.uri
if (fact_datatype_name == DINGOS_DEFAULT_FACT_DATATYPE and
fact_datatype_ns == DINGOS_NAMESPACE_URI) or fact_thru.fact.value_iobject_id:
pass
else:
if not fact_datatype_ns in namespace_mapping:
namespace_slug = make_ns_slug(name_counter)
namespace_mapping[fact_datatype_ns] = namespace_slug
else:
namespace_slug= namespace_mapping[fact_datatype_ns]
fact_dict['@@type'] = '%s:%s' % (namespace_slug,fact_datatype_name)
value_list.append(fact_value.value)
fact_dict['value_list'] = value_list
if fact_thru.fact.value_iobject_id:
value_iobject_id_ns = fact_thru.fact.value_iobject_id.namespace.uri
if not value_iobject_id_ns in namespace_mapping:
namespace_slug = make_ns_slug(name_counter)
namespace_mapping[value_iobject_id_ns] = namespace_slug
else:
namespace_slug= namespace_mapping[value_iobject_id_ns]
value_iobject_id =fact_thru.fact.value_iobject_id.uid
if fact_dict['attribute']:
# Here we treat the case that the reference is part of an attribute such as
# 'phase_id'
fact_dict['value_list'] = ["%s:%s" % (namespace_slug,value_iobject_id)]
else:
# Otherwise, we sneak in an idref attribute. Because the code that
# generates the dictionary simply dumps all untreated key-value paris
# into the generated dictionary, this works... but is a bit of a hack, really.
fact_dict['@idref'] = "%s:%s" % (namespace_slug,value_iobject_id)
flat_result.append(fact_dict)
result = DingoObjDict()
result.from_flat_repr(flat_result,
include_node_id=include_node_id,
no_attributes=no_attributes,
track_namespaces=track_namespaces,
namespace_mapping=namespace_mapping
)
if not no_attributes:
if not track_namespaces:
result['@@iobject_type'] = self.iobject_type.name
result['@@iobject_type_ns'] = self.iobject_type.namespace.uri
return result
else:
result['@ns'] = namespace_mapping["%s-%s" % (self.iobject_type.namespace.uri,self.iobject_type_revision)]
#result['@@iobject_type'] = self.iobject_type.name
return {'namespaces': dict(map(lambda x : (x[1],x[0]), namespace_mapping.items())),
'objects' : [result]
}
else:
return result | 0.011117 |
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json | 0.009202 |
def _get_attributes(schema, location):
"""Return the schema's children, filtered by location."""
schema = DottedNameResolver(__name__).maybe_resolve(schema)
def _filter(attr):
if not hasattr(attr, "location"):
valid_location = 'body' in location
else:
valid_location = attr.location in to_list(location)
return valid_location
return list(filter(_filter, schema().children)) | 0.004211 |
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
def _through_option(func, *args, **kwds):
"""new function"""
try:
func(*args, **kwds)
except optparse.BadOptionError as err:
largs.append(err.opt_str)
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
# self._process_long_opt(rargs, values)
_through_option(self._process_long_opt, rargs, values) # modified
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
_through_option(self._process_short_opts, rargs, values) # modified
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return | 0.002398 |
def copy(self):
"""
Returns a "T" (tee) copy of the given stream, allowing the calling
stream to continue being used.
"""
a, b = it.tee(self._data) # 2 generators, not thread-safe
self._data = a
return Stream(b) | 0.008368 |
def get_fobj(fname, mode='w+'):
"""Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object. If
*fname* is a file object, then we do nothing and ignore the specified
*mode* parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string, then *close* will be *True* to signify that
the file object should be closed after writing to it. Otherwise,
*close* will be *False* signifying that the user, in essence,
created the file object already and that subsequent operations
should not close it.
"""
if is_string_like(fname):
fobj = open(fname, mode)
close = True
elif hasattr(fname, 'write'):
# fname is a file-like object, perhaps a StringIO (for example)
fobj = fname
close = False
else:
# assume it is a file descriptor
fobj = os.fdopen(fname, mode)
close = False
return fobj, close | 0.000856 |
def _updateParamsFrom(self, otherObj:"PropDeclrCollector", updater, exclude:set, prefix:str) -> None:
"""
Update all parameters which are defined on self from otherObj
:param otherObj: other object which Param instances should be updated
:param updater: updater function(self, myParameter, onOtherParameterName, otherParameter)
:param exclude: iterable of parameter on otherObj object which should be excluded
:param prefix: prefix which should be added to name of paramters of this object before matching
parameter name on parent
"""
excluded = set()
if exclude is not None:
exclude = set(exclude)
for myP in self._params:
pPName = prefix + myP._scopes[self][1]
try:
otherP = getattr(otherObj, pPName)
if not isinstance(otherP, Param):
continue
except AttributeError:
continue
if exclude and otherP in exclude:
excluded.add(otherP)
continue
updater(self, myP, otherP)
if exclude is not None:
# assert that what should be excluded really exists
assert excluded == exclude | 0.007819 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.