text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_schema(self, filename):
"""
Guess schema using messytables
"""
table_set = self.read_file(filename)
# Have I been able to read the filename
if table_set is None:
return []
# Get the first table as rowset
row_set = table_set.tables[0]
offset, headers = headers_guess(row_set.sample)
row_set.register_processor(headers_processor(headers))
row_set.register_processor(offset_processor(offset + 1))
types = type_guess(row_set.sample, strict=True)
# Get a sample as well..
sample = next(row_set.sample)
clean = lambda v: str(v) if not isinstance(v, str) else v
schema = []
for i, h in enumerate(headers):
schema.append([h,
str(types[i]),
clean(sample[i].value)])
return schema | 0.0076 |
def throttle(self, wait):
"""
Returns a function, that, when invoked, will only be triggered
at most once during a given window of time.
"""
ns = self.Namespace()
ns.timeout = None
ns.throttling = None
ns.more = None
ns.result = None
def done():
ns.more = ns.throttling = False
whenDone = _.debounce(done, wait)
wait = (float(wait) / float(1000))
def throttled(*args, **kwargs):
def later():
ns.timeout = None
if ns.more:
self.obj(*args, **kwargs)
whenDone()
if not ns.timeout:
ns.timeout = Timer(wait, later)
ns.timeout.start()
if ns.throttling:
ns.more = True
else:
ns.throttling = True
ns.result = self.obj(*args, **kwargs)
whenDone()
return ns.result
return self._wrap(throttled) | 0.001938 |
def sample(self, bqm, **kwargs):
"""Sample from the specified binary quadratic model.
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
**kwargs:
Optional keyword arguments for the sampling method, specified per solver.
Returns:
:class:`dimod.SampleSet`
Examples:
This example submits a simple Ising problem of just two variables on a
D-Wave system selected by the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:intro>`.
Because the problem fits in a single :term:`Chimera` unit cell, it is tiled
across the solver's entire Chimera graph, resulting in multiple samples
(the exact number depends on the working Chimera graph of the D-Wave system).
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import EmbeddingComposite
>>> from dwave.system.composites import EmbeddingComposite, TilingComposite
...
>>> sampler = EmbeddingComposite(TilingComposite(DWaveSampler(), 1, 1, 4))
>>> response = sampler.sample_ising({},{('a', 'b'): 1})
>>> len(response) # doctest: +SKIP
246
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
# apply the embeddings to the given problem to tile it across the child sampler
embedded_bqm = dimod.BinaryQuadraticModel.empty(bqm.vartype)
__, __, target_adjacency = self.child.structure
for embedding in self.embeddings:
embedded_bqm.update(dwave.embedding.embed_bqm(bqm, embedding, target_adjacency))
# solve the problem on the child system
tiled_response = self.child.sample(embedded_bqm, **kwargs)
responses = []
for embedding in self.embeddings:
embedding = {v: chain for v, chain in embedding.items() if v in bqm.variables}
responses.append(dwave.embedding.unembed_sampleset(tiled_response, embedding, bqm))
return dimod.concatenate(responses) | 0.006533 |
def get_gmm_pdf(self, x):
"""Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
"""
def my_norm_pdf(xt, mu, sigma):
z = (xt - mu) / sigma
return (math.exp(-0.5 * z * z)
/ (math.sqrt(2. * np.pi) * sigma))
y = 0
if (x < self.min_limit):
return 0
if (x > self.max_limit):
return 0
for _x in range(self.points.size):
y += (my_norm_pdf(x, self.points[_x], self.sigma[_x])
* self.weights[_x]) / self.W_sum
return y | 0.002151 |
def get_idx(self, node):
"""
Finds the index of the node in the sorted list.
"""
group = self.find_node_group_membership(node)
return self.nodes[group].index(node) | 0.009852 |
def duplicate(self, name=None, location=None):
"""
Duplicate a project
It's the save as feature of the 1.X. It's implemented on top of the
export / import features. It will generate a gns3p and reimport it.
It's a little slower but we have only one implementation to maintain.
:param name: Name of the new project. A new one will be generated in case of conflicts
:param location: Parent directory of the new project
"""
# If the project was not open we open it temporary
previous_status = self._status
if self._status == "closed":
yield from self.open()
self.dump()
try:
with tempfile.TemporaryDirectory() as tmpdir:
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
with open(os.path.join(tmpdir, "project.gns3p"), "wb") as f:
for data in zipstream:
f.write(data)
with open(os.path.join(tmpdir, "project.gns3p"), "rb") as f:
project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
except (OSError, UnicodeEncodeError) as e:
raise aiohttp.web.HTTPConflict(text="Can not duplicate project: {}".format(str(e)))
if previous_status == "closed":
yield from self.close()
return project | 0.004024 |
def _maybe_normalize(self, var):
"""normalize variant if requested, and ignore HGVSUnsupportedOperationError
This is better than checking whether the variant is intronic because
future UTAs will support LRG, which will enable checking intronic variants.
"""
if self.normalize:
try:
return self._norm.normalize(var)
except HGVSUnsupportedOperationError as e:
_logger.warning(str(e) + "; returning unnormalized variant")
# fall through to return unnormalized variant
return var | 0.006723 |
def get_name2value_dict(self):
"""returns a dictionary, that maps between `enum` name( key ) and
`enum` value( value )"""
x = {}
for val, num in self._values:
x[val] = num
return x | 0.008621 |
def scan(self, string):
""" Like findall, but also returning matching start and end string locations
"""
return list(self._scanner_to_matches(self.pattern.scanner(string), self.run)) | 0.019417 |
def show_run(**kwargs):
'''
Shortcut to run `show running-config` on the NX-OS device.
.. code-block:: bash
salt '*' nxos.cmd show_run
'''
command = 'show running-config'
info = ''
info = show(command, **kwargs)
if isinstance(info, list):
info = info[0]
return info | 0.003135 |
def fetch_upstream(self):
""" git fetch <upstream> """
set_state(WORKFLOW_STATES.FETCHING_UPSTREAM)
cmd = ["git", "fetch", self.upstream]
self.run_cmd(cmd)
set_state(WORKFLOW_STATES.FETCHED_UPSTREAM) | 0.008368 |
def transferReporter(self, xferId, message):
''' the callback method used by the Aspera sdk during transfer
to notify progress, error or successful completion
'''
if self.is_stopped():
return True
_asp_message = AsperaMessage(message)
if not _asp_message.is_msg_type(
[enumAsperaMsgType.INIT,
enumAsperaMsgType.DONE,
enumAsperaMsgType.ERROR,
enumAsperaMsgType.FILEERROR,
enumAsperaMsgType.STATS]):
return
_session_id = _asp_message.get_session_id()
_msg = self.debug_id(xferId, _session_id) + " : " + _asp_message._msg_type
logger.info(_msg)
with self._session_lock:
if _asp_message.is_msg_type([enumAsperaMsgType.INIT]):
assert(_session_id not in self._sessions)
_session = AsperaSession(_session_id)
self._sessions[_session_id] = _session
self.notify_init()
else:
_session = self._sessions[_session_id]
if _asp_message.is_msg_type([enumAsperaMsgType.DONE]):
if _session.set_bytes_transferred(_asp_message.get_bytes_transferred()):
self.notify_progress()
_session.set_success()
self.notify_done()
elif _asp_message.is_msg_type([enumAsperaMsgType.ERROR, enumAsperaMsgType.FILEERROR]):
_session.set_error(_asp_message.get_error_descr())
self.notify_done(error=True)
elif _asp_message.is_msg_type([enumAsperaMsgType.STATS]):
if _session.set_bytes_transferred(_asp_message.get_bytes_transferred()):
self.notify_progress() | 0.004042 |
def find(self):
"""Call the find function"""
options = self.find_options.get_options()
if options is None:
return
self.stop_and_reset_thread(ignore_results=True)
self.search_thread = SearchThread(self)
self.search_thread.sig_finished.connect(self.search_complete)
self.search_thread.sig_current_file.connect(
lambda x: self.status_bar.set_label_path(x, folder=False)
)
self.search_thread.sig_current_folder.connect(
lambda x: self.status_bar.set_label_path(x, folder=True)
)
self.search_thread.sig_file_match.connect(
self.result_browser.append_result
)
self.search_thread.sig_out_print.connect(
lambda x: sys.stdout.write(str(x) + "\n")
)
self.status_bar.reset()
self.result_browser.clear_title(
self.find_options.search_text.currentText())
self.search_thread.initialize(*options)
self.search_thread.start()
self.find_options.ok_button.setEnabled(False)
self.find_options.stop_button.setEnabled(True)
self.status_bar.show() | 0.001679 |
def equal(self, cwd):
""" Returns True if left and right are equal
"""
cmd = ["diff"]
cmd.append("-q")
cmd.append(self.left.get_name())
cmd.append(self.right.get_name())
try:
Process(cmd).run(cwd=cwd, suppress_output=True)
except SubprocessError as e:
if e.get_returncode() == 1:
return False
else:
raise e
return True | 0.004357 |
def intersect(self,range2):
"""Return the chunk they overlap as a range.
options is passed to result from this object
:param range2:
:type range2: GenomicRange
:return: Range with the intersecting segement, or None if not overlapping
:rtype: GenomicRange
"""
if not self.overlaps(range2): return None
return type(self)(self.chr,max(self.start,range2.start)+self._start_offset,min(self.end,range2.end),self.payload,self.dir) | 0.021598 |
def convert_jsonld_cdr(doc):
"""
converts json ld output of etk to cdr object
:param doc: the input Knowledge graph in json ld format
:return: a cdr object with embedded knowledge graph and doc_id.
# TODO DIG UI needs a @timestamp_crawl(?) add this too
"""
new_docs = list()
new_doc = dict()
kg = dict()
for key in list(doc):
if key == '@id':
new_doc['doc_id'] = doc['@id']
elif key == '@context':
new_doc[key] = doc[key]
elif key == '@type':
kg['type'] = list()
types = doc['@type']
if not isinstance(types, list):
types = [types]
for type in types:
kg['type'].append({'value': type, 'key': create_key_from_value(type, 'type')})
else:
kg[key] = list()
objs = doc[key]
if not isinstance(objs, list):
objs = [objs]
for obj in objs:
if '@id' in obj and '@context' in obj:
new_docs.extend(convert_jsonld_cdr(obj))
if '@id' in obj:
kg[key].append({'value': obj['@id'], 'key': obj['@id']})
elif '@value' in obj:
val = obj['@value']
k_val = create_key_from_value(val, key)
kg[key].append({'value': val, 'key': k_val})
new_doc['knowledge_graph'] = kg
new_docs.append(new_doc)
return new_docs | 0.001347 |
def dictionary_validator(key_type, value_type):
"""Validator for ``attrs`` that performs deep type checking of dictionaries."""
def _validate_dictionary(instance, attribute, value):
# pylint: disable=unused-argument
"""Validate that a dictionary is structured as expected.
:raises TypeError: if ``value`` is not a dictionary
:raises TypeError: if ``value`` keys are not all of ``key_type`` type
:raises TypeError: if ``value`` values are not all of ``value_type`` type
"""
if not isinstance(value, dict):
raise TypeError('"{}" must be a dictionary'.format(attribute.name))
for key, data in value.items():
if not isinstance(key, key_type):
raise TypeError(
'"{name}" dictionary keys must be of type "{type}"'.format(name=attribute.name, type=key_type)
)
if not isinstance(data, value_type):
raise TypeError(
'"{name}" dictionary values must be of type "{type}"'.format(name=attribute.name, type=value_type)
)
return _validate_dictionary | 0.004325 |
def remove_study(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None):
"""Remove a study
Given a study_id, branch and optionally an
author, remove a study on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
if fourth_arg is None:
study_id, branch_name, author = first_arg, sec_arg, third_arg
gh_user = branch_name.split('_study_')[0]
parent_sha = self.get_master_sha()
else:
gh_user, study_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg
if commit_msg is None:
commit_msg = "Delete Study #%s via OpenTree API" % study_id
return self._remove_document(gh_user, study_id, parent_sha, author, commit_msg) | 0.006053 |
def edge_style(self, head, tail, **kwargs):
'''
Modifies an edge style to the dot representation.
'''
if tail not in self.nodes:
raise GraphError("invalid node %s" % (tail,))
try:
if tail not in self.edges[head]:
self.edges[head][tail]= {}
self.edges[head][tail] = kwargs
except KeyError:
raise GraphError("invalid edge %s -> %s " % (head, tail) ) | 0.008677 |
def request_signature(self):
"""
The signature passed in the request.
"""
signature = self.query_parameters.get(_x_amz_signature)
if signature is not None:
signature = signature[0]
else:
signature = self.authorization_header_parameters.get(_signature)
if signature is None:
raise AttributeError("Signature was not passed in the request")
return signature | 0.006316 |
def calc_copulas(self,
output_file,
model_names=("start-time", "translation-x", "translation-y"),
label_columns=("Start_Time_Error", "Translation_Error_X", "Translation_Error_Y")):
"""
Calculate a copula multivariate normal distribution from the training data for each group of ensemble members.
Distributions are written to a pickle file for later use.
Args:
output_file: Pickle file
model_names: Names of the tracking models
label_columns: Names of the data columns used for labeling
Returns:
"""
if len(self.data['train']) == 0:
self.load_data()
groups = self.data["train"]["member"][self.group_col].unique()
copulas = {}
label_columns = list(label_columns)
for group in groups:
print(group)
group_data = self.data["train"]["total_group"].loc[
self.data["train"]["total_group"][self.group_col] == group]
group_data = group_data.dropna()
group_data.reset_index(drop=True, inplace=True)
copulas[group] = {}
copulas[group]["mean"] = group_data[label_columns].mean(axis=0).values
copulas[group]["cov"] = np.cov(group_data[label_columns].values.T)
copulas[group]["model_names"] = list(model_names)
del group_data
pickle.dump(copulas, open(output_file, "w"), pickle.HIGHEST_PROTOCOL) | 0.005956 |
def plotloc(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight loc figure """
fields = ['l1', 'm1', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
l1 = [data['l1'][i] for i in inds]
l1_min = min(l1)
l1_max = max(l1)
m1 = [data['m1'][i] for i in inds]
m1_min = min(m1)
m1_max = max(m1)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='l1 (rad)', y_axis_label='m1 (rad)',
x_range=(l1_min, l1_max), y_range=(m1_min,m1_max), tools=tools, output_backend='webgl')
loc.circle('l1', 'm1', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.cross('l1', 'm1', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.circle('l1', 'm1', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}[email protected]'.format(url_path, fileroot)
taptool = loc.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return loc | 0.010628 |
def _compute_all_deletions(self):
"""Returns all minimal edge covers of the set of evil edges.
"""
minimum_evil = []
for disabled_qubits in map(set, product(*self._evil)):
newmin = []
for s in minimum_evil:
if s < disabled_qubits:
break
elif disabled_qubits < s:
continue
newmin.append(s)
else:
minimum_evil = newmin + [disabled_qubits]
return minimum_evil | 0.003731 |
def expand(self, repex_vars, fields):
r"""Receive a dict of variables and a dict of fields
and iterates through them to expand a variable in an field, then
returns the fields dict with its variables expanded.
This will fail if not all variables expand (due to not providing
all necessary ones).
fields:
type: VERSION
path: resources
excluded:
- excluded_file.file
base_directory: '{{ .base_dir }}'
match: '"version": "\d+\.\d+(\.\d+)?(-\w\d+)?'
replace: \d+\.\d+(\.\d+)?(-\w\d+)?
with: "{{ .version }}"
must_include:
- {{ .my_var }}/{{ .another_var }}
- {{ .my_other_var }}
- version
validator:
type: per_file
path: {{ .my_validator_path }}
function: validate
variables:
{
'version': 3,
'base_dir': .
...
}
:param dict vars: dict of variables
:param dict fields: dict of fields as shown above.
"""
logger.debug('Expanding variables...')
unexpanded_instances = set()
# Expand variables in variables
# TODO: This should be done in the global scope.
# _VariableHandler is called per path, which makes this redundant
# as variables are declared globally per config.
for k, v in repex_vars.items():
repex_vars[k] = self._expand_var(v, repex_vars)
instances = self._get_instances(repex_vars[k])
unexpanded_instances.update(instances)
# TODO: Consolidate variable expansion code into single logic
# Expand variables in path objects
for key in fields.keys():
field = fields[key]
if isinstance(field, str):
fields[key] = self._expand_var(field, repex_vars)
instances = self._get_instances(fields[key])
unexpanded_instances.update(instances)
elif isinstance(field, dict):
for k, v in field.items():
fields[key][k] = self._expand_var(v, repex_vars)
instances = self._get_instances(fields[key][k])
unexpanded_instances.update(instances)
elif isinstance(field, list):
for index, item in enumerate(field):
fields[key][index] = self._expand_var(item, repex_vars)
instances = self._get_instances(fields[key][index])
unexpanded_instances.update(instances)
if unexpanded_instances:
raise RepexError(
'Variables failed to expand: {0}\n'
'Please make sure to provide all necessary variables '.format(
list(unexpanded_instances)))
return fields | 0.000698 |
def up(name, debug=False):
'''
Create servers and containers as required to meet the configuration
specified in _name_.
Args:
* name: The name of the yaml config file (you can omit the .yml extension for convenience)
Example:
fab ensemble.up:wordpress
'''
if debug:
env.ensemble_debug = True
filenames_to_try = [
name,
'%s.yml' % name,
'%s.yaml' % name,
]
for filename in filenames_to_try:
if os.path.exists(filename):
with open(filename, 'r') as f:
config = yaml.load(f)
break
else:
abort('Ensemble manifest not found: %s' % name)
uncache()
try:
do_up(config)
except exceptions.ConfigException, e:
abort('Config error: ' + str(e)) | 0.00246 |
def WriteClientSnapshotHistory(self, clients, cursor=None):
"""Writes the full history for a particular client."""
client_id = clients[0].client_id
latest_timestamp = max(client.timestamp for client in clients)
query = ""
params = {
"client_id": db_utils.ClientIDToInt(client_id),
"latest_timestamp": mysql_utils.RDFDatetimeToTimestamp(latest_timestamp)
}
for idx, client in enumerate(clients):
startup_info = client.startup_info
client.startup_info = None
query += """
INSERT INTO client_snapshot_history (client_id, timestamp,
client_snapshot)
VALUES (%(client_id)s, FROM_UNIXTIME(%(timestamp_{idx})s),
%(client_snapshot_{idx})s);
INSERT INTO client_startup_history (client_id, timestamp,
startup_info)
VALUES (%(client_id)s, FROM_UNIXTIME(%(timestamp_{idx})s),
%(startup_info_{idx})s);
""".format(idx=idx)
params.update({
"timestamp_{idx}".format(idx=idx):
mysql_utils.RDFDatetimeToTimestamp(client.timestamp),
"client_snapshot_{idx}".format(idx=idx):
client.SerializeToString(),
"startup_info_{idx}".format(idx=idx):
startup_info.SerializeToString(),
})
client.startup_info = startup_info
query += """
UPDATE clients
SET last_snapshot_timestamp = FROM_UNIXTIME(%(latest_timestamp)s)
WHERE client_id = %(client_id)s
AND (last_snapshot_timestamp IS NULL OR
last_snapshot_timestamp < FROM_UNIXTIME(%(latest_timestamp)s));
UPDATE clients
SET last_startup_timestamp = FROM_UNIXTIME(%(latest_timestamp)s)
WHERE client_id = %(client_id)s
AND (last_startup_timestamp IS NULL OR
last_startup_timestamp < FROM_UNIXTIME(%(latest_timestamp)s));
"""
try:
cursor.execute(query, params)
except MySQLdb.IntegrityError as error:
raise db.UnknownClientError(client_id, cause=error) | 0.00435 |
def run_migrations_offline():
"""
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = context.config.get_main_option("sqlalchemy.url")
context.configure(url=url, compare_server_default=True)
with context.begin_transaction():
context.run_migrations() | 0.001783 |
def add_args(parser, positional=False):
"""
Extends a commandline argument parser with arguments for specifying
read sources.
"""
group = parser.add_argument_group("read loading")
group.add_argument("reads" if positional else "--reads",
nargs="+", default=[],
help="Paths to bam files. Any number of paths may be specified.")
group.add_argument(
"--read-source-name",
nargs="+",
help="Names for each read source. The number of names specified "
"must match the number of bam files. If not specified, filenames are "
"used for names.")
# Add filters
group = parser.add_argument_group(
"read filtering",
"A number of read filters are available. See the pysam "
"documentation (http://pysam.readthedocs.org/en/latest/api.html) "
"for details on what these fields mean. When multiple filter "
"options are specified, reads must match *all* filters.")
for (name, (kind, message, function)) in READ_FILTERS.items():
extra = {}
if kind is bool:
extra["action"] = "store_true"
extra["default"] = None
elif kind is int:
extra["type"] = int
extra["metavar"] = "N"
elif kind is str:
extra["metavar"] = "STRING"
group.add_argument("--" + name.replace("_", "-"),
help=message,
**extra) | 0.003484 |
def to_json(self):
"""
Convert to a JSON string.
"""
obj = {
"vertices": [
{
"id": vertex.id,
"annotation": vertex.annotation,
}
for vertex in self.vertices
],
"edges": [
{
"id": edge.id,
"annotation": edge.annotation,
"head": edge.head,
"tail": edge.tail,
}
for edge in self._edges
],
}
# Ensure that we always return unicode output on Python 2.
return six.text_type(json.dumps(obj, ensure_ascii=False)) | 0.002759 |
def show_clock_output_clock_time_current_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_clock = ET.Element("show_clock")
config = show_clock
output = ET.SubElement(show_clock, "output")
clock_time = ET.SubElement(output, "clock-time")
current_time = ET.SubElement(clock_time, "current-time")
current_time.text = kwargs.pop('current_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003717 |
def read_data_event(self, whence, complete=False, can_flush=False):
"""Creates a transition to a co-routine for retrieving data as bytes.
Args:
whence (Coroutine): The co-routine to return to after the data is satisfied.
complete (Optional[bool]): True if STREAM_END should be emitted if no bytes are read or
available; False if INCOMPLETE should be emitted in that case.
can_flush (Optional[bool]): True if NEXT may be requested after INCOMPLETE is emitted as a result of this
data request.
"""
return Transition(None, _read_data_handler(whence, self, complete, can_flush)) | 0.008889 |
def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0):
"""
Move to the indicated HDU by name
In general, it is not necessary to use this method explicitly.
returns the one-offset extension number
"""
extname = mks(extname)
hdu = self._FITS.movnam_hdu(hdutype, extname, extver)
return hdu | 0.005618 |
def _convert_unit(unit):
"""Convert different names into SI units.
Parameters
----------
unit : str
unit to convert to SI
Returns
-------
str
unit in SI format.
Notes
-----
SI unit such as mV (milliVolt, mVolt), μV (microVolt, muV).
"""
if unit is None:
return ''
prefix = None
suffix = None
if unit[:5].lower() == 'milli':
prefix = 'm'
unit = unit[5:]
elif unit[:5].lower() == 'micro':
prefix = mu
unit = unit[5:]
elif unit[:2].lower() == 'mu':
prefix = mu
unit = unit[2:]
if unit[-4:].lower() == 'volt':
suffix = 'V'
unit = unit[:-4]
if prefix is None and suffix is None:
unit = unit
elif prefix is None and suffix is not None:
unit = unit + suffix
elif prefix is not None and suffix is None:
unit = prefix + unit
else:
unit = prefix + suffix
return unit | 0.001025 |
def _cleanup_after_optimize_aux(filename, new_filename, old_format,
new_format):
"""
Replace old file with better one or discard new wasteful file.
"""
bytes_in = 0
bytes_out = 0
final_filename = filename
try:
bytes_in = os.stat(filename).st_size
bytes_out = os.stat(new_filename).st_size
if (bytes_out > 0) and ((bytes_out < bytes_in) or Settings.bigger):
if old_format != new_format:
final_filename = replace_ext(filename,
new_format.lower())
rem_filename = filename + REMOVE_EXT
if not Settings.test:
os.rename(filename, rem_filename)
os.rename(new_filename, final_filename)
os.remove(rem_filename)
else:
os.remove(new_filename)
else:
os.remove(new_filename)
bytes_out = bytes_in
except OSError as ex:
print(ex)
return final_filename, bytes_in, bytes_out | 0.00094 |
def HasTable(self, table_name):
"""Determines if a specific table exists.
Args:
table_name (str): table name.
Returns:
bool: True if the table exists.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError(
'Cannot determine if table exists database not opened.')
sql_query = self._HAS_TABLE_QUERY.format(table_name)
self._cursor.execute(sql_query)
if self._cursor.fetchone():
return True
return False | 0.005629 |
def helper_parallel_lines(start0, end0, start1, end1, filename):
"""Image for :func:`.parallel_lines_parameters` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
points = stack1d(start0, end0, start1, end1)
ax.plot(points[0, :2], points[1, :2], marker="o")
ax.plot(points[0, 2:], points[1, 2:], marker="o")
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(figure, filename) | 0.002155 |
def get_img_heatmap(orig_img, activation_map):
"""Draw a heatmap on top of the original image using intensities from activation_map"""
heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_COOL)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
img_heatmap = np.float32(heatmap) + np.float32(orig_img)
img_heatmap = img_heatmap / np.max(img_heatmap)
img_heatmap *= 255
return img_heatmap.astype(int) | 0.00464 |
def zoom_pinch_cb(self, fitsimage, event):
"""Pinch event in the pan window. Just zoom the channel viewer.
"""
chviewer = self.fv.getfocus_viewer()
bd = chviewer.get_bindings()
if hasattr(bd, 'pi_zoom'):
return bd.pi_zoom(chviewer, event)
return False | 0.006369 |
def format_t_into_dhms_format(timestamp):
""" Convert an amount of second into day, hour, min and sec
:param timestamp: seconds
:type timestamp: int
:return: 'Ad Bh Cm Ds'
:rtype: str
>>> format_t_into_dhms_format(456189)
'5d 6h 43m 9s'
>>> format_t_into_dhms_format(3600)
'0d 1h 0m 0s'
"""
mins, timestamp = divmod(timestamp, 60)
hour, mins = divmod(mins, 60)
day, hour = divmod(hour, 24)
return '%sd %sh %sm %ss' % (day, hour, mins, timestamp) | 0.00198 |
def finditer(self, string, pos=0, endpos=sys.maxint):
"""Return a list of all non-overlapping matches of pattern in string."""
scanner = self.scanner(string, pos, endpos)
return iter(scanner.search, None) | 0.013158 |
def open_file(path):
"""Opens Explorer/Finder with given path, depending on platform"""
if sys.platform=='win32':
os.startfile(path)
#subprocess.Popen(['start', path], shell= True)
elif sys.platform=='darwin':
subprocess.Popen(['open', path])
else:
try:
subprocess.Popen(['xdg-open', path])
except OSError:
pass | 0.014963 |
def commit(self, snapshot: Tuple[Hash32, UUID]) -> None:
"""
Commit the journal to the point where the snapshot was taken. This
will merge in any changesets that were recorded *after* the snapshot changeset.
"""
_, account_snapshot = snapshot
self._account_db.commit(account_snapshot) | 0.009009 |
def surface_constructor(surface):
"""Image for :class`.Surface` docstring."""
if NO_IMAGES:
return
ax = surface.plot(256, with_nodes=True)
line = ax.lines[0]
nodes = surface._nodes
add_patch(ax, nodes[:, (0, 1, 2, 5)], line.get_color())
delta = 1.0 / 32.0
ax.text(
nodes[0, 0],
nodes[1, 0],
r"$v_0$",
fontsize=20,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
nodes[0, 1],
nodes[1, 1],
r"$v_1$",
fontsize=20,
verticalalignment="top",
horizontalalignment="center",
)
ax.text(
nodes[0, 2],
nodes[1, 2],
r"$v_2$",
fontsize=20,
verticalalignment="top",
horizontalalignment="left",
)
ax.text(
nodes[0, 3] - delta,
nodes[1, 3],
r"$v_3$",
fontsize=20,
verticalalignment="center",
horizontalalignment="right",
)
ax.text(
nodes[0, 4] + delta,
nodes[1, 4],
r"$v_4$",
fontsize=20,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
nodes[0, 5],
nodes[1, 5] + delta,
r"$v_5$",
fontsize=20,
verticalalignment="bottom",
horizontalalignment="center",
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "surface_constructor.png") | 0.000675 |
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method="--")
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException:
pass
return [] | 0.005731 |
def _is_bval_type_a(grouped_dicoms):
"""
Check if the bvals are stored in the first of 2 currently known ways for single frame dti
"""
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for group in grouped_dicoms:
if bvec_x_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_x_tag])) and \
bvec_y_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_y_tag])) and \
bvec_z_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_z_tag])) and \
bval_tag in group[0] and _is_float(common.get_fl_value(group[0][bval_tag])) and \
common.get_fl_value(group[0][bval_tag]) != 0:
return True
return False | 0.007255 |
def NR_plot(stream, NR_stream, detections, false_detections=False,
size=(18.5, 10), **kwargs):
"""
Plot Network response alongside the stream used.
Highlights detection times in the network response.
:type stream: obspy.core.stream.Stream
:param stream: Stream to plot
:type NR_stream: obspy.core.stream.Stream
:param NR_stream: Stream for the network response
:type detections: list
:param detections: List of the detection time as :class:`datetime.datetime`
:type false_detections: list
:param false_detections:
Either False (default) or list of false detection times
(:class:`datetime.datetime`).
:type size: tuple
:param size: Size of figure, default is (18.5, 10)
:returns: :class:`matplotlib.figure.Figure`
.. Note::
Called by :mod:`eqcorrscan.core.bright_lights`, not a general use
plot (hence no example)
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(len(stream) + 1, 1, sharex=True, figsize=size)
if len(stream) > 1:
axes = axes.ravel()
else:
return
mintime = stream.sort(['starttime'])[0].stats.starttime
stream.sort(['network', 'station', 'starttime'])
for i, tr in enumerate(stream):
delay = tr.stats.starttime - mintime
delay *= tr.stats.sampling_rate
y = tr.data
x = [tr.stats.starttime.datetime + dt.timedelta(
seconds=s / tr.stats.sampling_rate) for s in range(len(y))]
x = mdates.date2num(x)
axes[i].plot(x, y, 'k', linewidth=1.1)
axes[i].set_ylabel('.'.join([tr.stats.station, tr.stats.channel]),
rotation=0)
axes[i].yaxis.set_ticks([])
axes[i].set_xlim(x[0], x[-1])
# Plot the network response
tr = NR_stream[0]
delay = tr.stats.starttime - mintime
delay *= tr.stats.sampling_rate
y = tr.data
x = [tr.stats.starttime.datetime +
dt.timedelta(seconds=s / tr.stats.sampling_rate)
for s in range(len(y))]
x = mdates.date2num(x)
axes[-1].plot(x, y, 'k', linewidth=1.1)
axes[-1].set_ylabel('.'.join([tr.stats.station, tr.stats.channel]),
rotation=0)
axes[-1].yaxis.set_ticks([])
axes[-1].set_xlabel('Time')
axes[-1].set_xlim(x[0], x[-1])
# Plot the detections!
ymin, ymax = axes[-1].get_ylim()
if false_detections:
for detection in false_detections:
xd = mdates.date2num(detection)
axes[-1].plot((xd, xd), (ymin, ymax), 'k--', linewidth=0.9,
alpha=0.5)
for detection in detections:
xd = mdates.date2num(detection)
axes[-1].plot((xd, xd), (ymin, ymax), 'r--', linewidth=1.1)
# Set formatters for x-labels
mins = mdates.MinuteLocator()
timedif = tr.stats.endtime.datetime - tr.stats.starttime.datetime
if timedif.total_seconds() >= 10800 and timedif.total_seconds() <= 25200:
hours = mdates.MinuteLocator(byminute=[0, 15, 30, 45])
elif timedif.total_seconds() <= 1200:
hours = mdates.MinuteLocator(byminute=range(0, 60, 2))
elif timedif.total_seconds > 25200 and timedif.total_seconds() <= 172800:
hours = mdates.HourLocator(byhour=range(0, 24, 3))
elif timedif.total_seconds() > 172800:
hours = mdates.DayLocator()
else:
hours = mdates.MinuteLocator(byminute=range(0, 60, 5))
hrFMT = mdates.DateFormatter('%Y/%m/%d %H:%M:%S')
axes[-1].xaxis.set_major_locator(hours)
axes[-1].xaxis.set_major_formatter(hrFMT)
axes[-1].xaxis.set_minor_locator(mins)
plt.gcf().autofmt_xdate()
axes[-1].fmt_xdata = mdates.DateFormatter('%Y/%m/%d %H:%M:%S')
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | 0.000262 |
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx] | 0.000759 |
def parse_args(args):
"""Uses python argparse to collect positional args"""
Log.info("Input args: %r" % args)
parser = argparse.ArgumentParser()
parser.add_argument("--shard", type=int, required=True)
parser.add_argument("--topology-name", required=True)
parser.add_argument("--topology-id", required=True)
parser.add_argument("--topology-defn-file", required=True)
parser.add_argument("--state-manager-connection", required=True)
parser.add_argument("--state-manager-root", required=True)
parser.add_argument("--state-manager-config-file", required=True)
parser.add_argument("--tmaster-binary", required=True)
parser.add_argument("--stmgr-binary", required=True)
parser.add_argument("--metrics-manager-classpath", required=True)
parser.add_argument("--instance-jvm-opts", required=True)
parser.add_argument("--classpath", required=True)
parser.add_argument("--master-port", required=True)
parser.add_argument("--tmaster-controller-port", required=True)
parser.add_argument("--tmaster-stats-port", required=True)
parser.add_argument("--heron-internals-config-file", required=True)
parser.add_argument("--override-config-file", required=True)
parser.add_argument("--component-ram-map", required=True)
parser.add_argument("--component-jvm-opts", required=True)
parser.add_argument("--pkg-type", required=True)
parser.add_argument("--topology-binary-file", required=True)
parser.add_argument("--heron-java-home", required=True)
parser.add_argument("--shell-port", required=True)
parser.add_argument("--heron-shell-binary", required=True)
parser.add_argument("--metrics-manager-port", required=True)
parser.add_argument("--cluster", required=True)
parser.add_argument("--role", required=True)
parser.add_argument("--environment", required=True)
parser.add_argument("--instance-classpath", required=True)
parser.add_argument("--metrics-sinks-config-file", required=True)
parser.add_argument("--scheduler-classpath", required=True)
parser.add_argument("--scheduler-port", required=True)
parser.add_argument("--python-instance-binary", required=True)
parser.add_argument("--cpp-instance-binary", required=True)
parser.add_argument("--metricscache-manager-classpath", required=True)
parser.add_argument("--metricscache-manager-master-port", required=True)
parser.add_argument("--metricscache-manager-stats-port", required=True)
parser.add_argument("--metricscache-manager-mode", required=False)
parser.add_argument("--is-stateful", required=True)
parser.add_argument("--checkpoint-manager-classpath", required=True)
parser.add_argument("--checkpoint-manager-port", required=True)
parser.add_argument("--checkpoint-manager-ram", type=long, required=True)
parser.add_argument("--stateful-config-file", required=True)
parser.add_argument("--health-manager-mode", required=True)
parser.add_argument("--health-manager-classpath", required=True)
parser.add_argument("--jvm-remote-debugger-ports", required=False,
help="ports to be used by a remote debugger for JVM instances")
parsed_args, unknown_args = parser.parse_known_args(args[1:])
if unknown_args:
Log.error('Unknown argument: %s' % unknown_args[0])
parser.print_help()
sys.exit(1)
return parsed_args | 0.001471 |
def index():
"""
main function - outputs in following format BEFORE consolidation (which is TODO)
# filename, word, linenumbers
# refAction.csv, ActionTypeName, 1
# refAction.csv, PhysicalType, 1
# goals.csv, Cleanliness, 11
"""
lg = mod_log.Log(mod_cfg.fldrs['localPath'])
lg.record_command('Starting indexing', 'index.py') # sys.modules[self.__module__].__file__)
if silent == 'N':
print('------------------')
print('Rebuilding Indexes')
print('------------------')
with open(ndxFile, "w") as ndx:
ndx.write('filename, word, linenumbers\n')
files_to_index = mod_fl.FileList([mod_cfg.fldrs['public_data_path'] + os.sep + 'core'], ['*.csv'], ignore_files, "files_to_index_filelist.csv")
if silent == 'N':
print(format_op_hdr())
for f in files_to_index.get_list():
buildIndex(f, ndxFile, silent)
# now build the one big index file
consolidate(ndxFile, opIndex )
lg.record_command('Finished indexing', 'index.py') #, fle.GetModuleName())
if silent == 'N':
print('Done') | 0.012038 |
def factor_loadings(
r, factors=None, scale=False, pickle_from=None, pickle_to=None
):
"""Security factor exposures generated through OLS regression.
Incorporates a handful of well-known factors models.
Parameters
----------
r : Series or DataFrame
The left-hand-side variable(s). If `r` is nx1 shape (a Series or
single-column DataFrame), the result will be a DataFrame. If `r` is
an nxm DataFrame, the result will be a dictionary of DataFrames.
factors : DataFrame or None, default None
Factor returns (right-hand-side variables). If None, factor returns
are loaded from `pyfinance.datasets.load_factors`
scale : bool, default False
If True, cale up/down the volatilities of all factors besides MKT & RF,
to the vol of MKT. Both means and the standard deviations are
multiplied by the scale factor (ratio of MKT.std() to other stdevs)
pickle_from : str or None, default None
Passed to `pyfinance.datasets.load_factors` if factors is not None
pickle_to : str or None, default None
Passed to `pyfinance.datasets.load_factors` if factors is not None
Example
-------
# TODO
"""
# TODO:
# - Might be appropriate for `returns`, will require higher dimensionality
# - Option to subtract or not subtract RF (Jensen alpha)
# - Annualized alpha
# - Add variance inflation factor to output (method of `ols.OLS`)
# - Add 'missing=drop' functionality (see statsmodels.OLS)
# - Take all combinations of factors; which has highest explanatory power
# or lowest SSE/MSE?
if factors is None:
factors = datasets.load_factors(
pickle_from=pickle_from, pickle_to=pickle_to
)
r, factors = utils.constrain(r, factors)
if isinstance(r, pd.Series):
n = 1
r = r.subtract(factors["RF"])
elif isinstance(r, pd.DataFrame):
n = r.shape[1]
r = r.subtract(np.tile(factors["RF"], (n, 1)).T)
# r = r.subtract(factors['RF'].values.reshape(-1,1))
else:
raise ValueError("`r` must be one of (Series, DataFrame)")
if scale:
# Scale up the volatilities of all factors besides MKT & RF, to the
# vol of MKT. Both means and the standard deviations are multiplied
# by the scale factor (ratio of MKT.std() to other stdevs)
tgtvol = factors["MKT"].std()
diff = factors.columns.difference(["MKT", "RF"]) # don't scale these
vols = factors[diff].std()
factors.loc[:, diff] = factors[diff] * tgtvol / vols
# Right-hand-side dict of models
rhs = OrderedDict(
[
("Capital Asset Pricing Model (CAPM)", ["MKT"]),
("Fama-French 3-Factor Model", ["MKT", "SMB", "HML"]),
("Carhart 4-Factor Model", ["MKT", "SMB", "HMLD", "UMD"]),
(
"Fama-French 5-Factor Model",
["MKT", "SMB", "HMLD", "RMW", "CMA"],
),
(
"AQR 6-Factor Model",
["MKT", "SMB", "HMLD", "RMW", "CMA", "UMD"],
),
("Price-Signal Model", ["MKT", "UMD", "STR", "LTR"])(
"Fung-Hsieh Trend-Following Model",
["BDLB", "FXLB", "CMLB", "STLB", "SILB"],
),
]
)
# Get union of keys and sort them according to `factors.columns`' order;
# used later as columns in result
cols = set(itertools.chain(*rhs.values()))
cols = [o for o in factors.columns if o in cols] + ["alpha", "rsq_adj"]
# Empty DataFrame to be populated with each regression's attributes
stats = ["coef", "tstat"]
idx = pd.MultiIndex.from_product([rhs.keys(), stats])
res = pd.DataFrame(columns=cols, index=idx)
# Regression calls
if n > 1:
# Dict of DataFrames
d = {}
for col in r:
for k, v in rhs.items():
res = res.copy()
model = ols.OLS(y=r[col], x=factors[v], hasconst=False)
res.loc[(k, "coef"), factors[v].columns] = model.beta()
res.loc[(k, "tstat"), factors[v].columns] = model.tstat_beta()
res.loc[(k, "coef"), "alpha"] = model.alpha()
res.loc[(k, "tstat"), "alpha"] = model.tstat_alpha()
res.loc[(k, "coef"), "rsq_adj"] = model.rsq_adj()
d[col] = res
res = d
else:
# Single DataFrame
for k, v in rhs.items():
model = ols.OLS(y=r, x=factors[v], hasconst=False)
res.loc[(k, "coef"), factors[v].columns] = model.beta()
res.loc[(k, "tstat"), factors[v].columns] = model.tstat_beta()
res.loc[(k, "coef"), "alpha"] = model.alpha()
res.loc[(k, "tstat"), "alpha"] = model.tstat_alpha()
res.loc[(k, "coef"), "rsq_adj"] = model.rsq_adj()
return res | 0.000199 |
def fetch_items(self, category, **kwargs):
"""Fetch the issues
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Looking for issues at site '%s', in project '%s' and updated from '%s'",
self.url, self.project, str(from_date))
whole_pages = self.client.get_issues(from_date)
fields = json.loads(self.client.get_fields())
custom_fields = filter_custom_fields(fields)
for whole_page in whole_pages:
issues = self.parse_issues(whole_page)
for issue in issues:
mapping = map_custom_field(custom_fields, issue['fields'])
for k, v in mapping.items():
issue['fields'][k] = v
comments_data = self.__get_issue_comments(issue['id'])
issue['comments_data'] = comments_data
yield issue | 0.002933 |
def copy(self, origTypeID, newTypeID):
"""copy(string, string) -> None
Duplicates the vType with ID origTypeID. The newly created vType is assigned the ID newTypeID
"""
self._connection._sendStringCmd(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.COPY, origTypeID, newTypeID) | 0.009646 |
def update_fw_local_cache(self, net, direc, start):
"""Update the fw dict with Net ID and service IP. """
fw_dict = self.get_fw_dict()
if direc == 'in':
fw_dict.update({'in_network_id': net, 'in_service_ip': start})
else:
fw_dict.update({'out_network_id': net, 'out_service_ip': start})
self.update_fw_dict(fw_dict) | 0.005277 |
def _get_help_names(self):
"""Return a mapping of help topic name to `.help_*()` method."""
# Determine the additional help topics, if any.
help_names = {}
token2cmdname = self._get_canonical_map()
for attrname, attr in self._gen_names_and_attrs():
if not attrname.startswith("help_"): continue
help_name = attrname[5:]
if help_name not in token2cmdname:
help_names[help_name] = attr
return help_names | 0.005988 |
def file_system_service(self):
"""Property providing access to the :class:`.FileSystemServiceAPI`"""
if self._fss_api is None:
self._fss_api = self.get_fss_api()
return self._fss_api | 0.009174 |
def abfSort(IDs):
"""
given a list of goofy ABF names, return it sorted intelligently.
This places things like 16o01001 after 16901001.
"""
IDs=list(IDs)
monO=[]
monN=[]
monD=[]
good=[]
for ID in IDs:
if ID is None:
continue
if 'o' in ID:
monO.append(ID)
elif 'n' in ID:
monN.append(ID)
elif 'd' in ID:
monD.append(ID)
else:
good.append(ID)
return sorted(good)+sorted(monO)+sorted(monN)+sorted(monD) | 0.01105 |
def build_command(command, parameter_map):
"""
Build command line(s) using the given parameter map.
Even if the passed a single `command`, this function will return a list
of shell commands. It is the caller's responsibility to concatenate them,
likely using the semicolon or double ampersands.
:param command: The command to interpolate params into.
:type command: str|list[str]
:param parameter_map: A ParameterMap object containing parameter knowledge.
:type parameter_map: valohai_yaml.objs.parameter_map.ParameterMap
:return: list of commands
:rtype: list[str]
"""
if isinstance(parameter_map, list): # Partially emulate old (pre-0.7) API for this function.
parameter_map = LegacyParameterMap(parameter_map)
out_commands = []
for command in listify(command):
# Only attempt formatting if the string smells like it should be formatted.
# This allows the user to include shell syntax in the commands, if required.
# (There's still naturally the chance for false-positives, so guard against
# those value errors and warn about them.)
if interpolable_re.search(command):
try:
command = interpolable_re.sub(
lambda match: _replace_interpolation(parameter_map, match),
command,
)
except ValueError as exc: # pragma: no cover
warnings.warn(
'failed to interpolate into %r: %s' % (command, exc),
CommandInterpolationWarning
)
out_commands.append(command.strip())
return out_commands | 0.002982 |
def base64url_decode(msg):
"""
Decode a base64 message based on JWT spec, Appendix B.
"Notes on implementing base64url encoding without padding"
"""
rem = len(msg) % 4
if rem:
msg += b'=' * (4 - rem)
return base64.urlsafe_b64decode(msg) | 0.003663 |
def ucas_download_playlist(url, output_dir = '.', merge = False, info_only = False, **kwargs):
'''course page'''
html = get_content(url)
parts = re.findall( r'(getplaytitle.do\?.+)"', html)
assert parts, 'No part found!'
for part_path in parts:
ucas_download('http://v.ucas.ac.cn/course/' + part_path, output_dir=output_dir, merge=merge, info_only=info_only) | 0.025773 |
def namedb_name_import_sanity_check( cur, opcode, op_data, history_id, block_id, vtxindex, prior_import, record_table):
"""
Sanity checks on a name-import:
* the opcode must match the op_data
* everything must match the record table.
* if prior_import is None, then the name shouldn't exist
* if prior_import is not None, then it must exist
Return True on success
Raise an exception on error.
DO NOT CALL THIS METHOD DIRECTLY.
"""
assert opcode in OPCODE_NAME_STATE_IMPORTS, "BUG: opcode '%s' does not import a name" % (opcode)
assert record_table == "name_records", "BUG: wrong table %s" % record_table
assert namedb_is_history_snapshot( op_data ), "BUG: import is incomplete"
namedb_op_sanity_check( opcode, op_data, op_data )
# must be the only such existant name, if prior_import is None
name_rec = namedb_get_name( cur, history_id, block_id )
if prior_import is None:
assert name_rec is None, "BUG: trying to import '%s' for the first time, again" % history_id
else:
assert name_rec is not None, "BUG: trying to overwrite non-existent import '%s'" % history_id
assert prior_import['name'] == history_id, "BUG: trying to overwrite import for different name '%s'" % history_id
# must actually be prior
assert prior_import['block_number'] < block_id or (prior_import['block_number'] == block_id and prior_import['vtxindex'] < vtxindex), \
"BUG: prior_import comes after op_data"
return True | 0.011017 |
def serial_starfeatures(lclist,
outdir,
lc_catalog_pickle,
neighbor_radius_arcsec,
maxobjects=None,
deredden=True,
custom_bandpasses=None,
lcformat='hat-sql',
lcformatdir=None):
'''This drives the `get_starfeatures` function for a collection of LCs.
Parameters
----------
lclist : list of str
The list of light curve file names to process.
outdir : str
The output directory where the results will be placed.
lc_catalog_pickle : str
The path to a catalog containing at a dict with least:
- an object ID array accessible with `dict['objects']['objectid']`
- an LC filename array accessible with `dict['objects']['lcfname']`
- a `scipy.spatial.KDTree` or `cKDTree` object to use for finding
neighbors for each object accessible with `dict['kdtree']`
A catalog pickle of the form needed can be produced using
:py:func:`astrobase.lcproc.catalogs.make_lclist` or
:py:func:`astrobase.lcproc.catalogs.filter_lclist`.
neighbor_radius_arcsec : float
This indicates the radius in arcsec to search for neighbors for this
object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,
and in GAIA.
maxobjects : int
The number of objects to process from `lclist`.
deredden : bool
This controls if the colors and any color classifications will be
dereddened using 2MASS DUST.
custom_bandpasses : dict or None
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
list of str
A list of all star features pickles produced.
'''
# make sure to make the output directory if it doesn't exist
if not os.path.exists(outdir):
os.makedirs(outdir)
if maxobjects:
lclist = lclist[:maxobjects]
# read in the kdtree pickle
with open(lc_catalog_pickle, 'rb') as infd:
kdt_dict = pickle.load(infd)
kdt = kdt_dict['kdtree']
objlist = kdt_dict['objects']['objectid']
objlcfl = kdt_dict['objects']['lcfname']
tasks = [(x, outdir, kdt, objlist, objlcfl,
neighbor_radius_arcsec,
deredden, custom_bandpasses,
lcformat, lcformatdir) for x in lclist]
for task in tqdm(tasks):
result = _starfeatures_worker(task)
return result | 0.001326 |
def _ParseNamesString(self, names_string):
"""Parses the name string.
Args:
names_string (str): comma separated filenames to filter.
"""
if not names_string:
return
names_string = names_string.lower()
names = [name.strip() for name in names_string.split(',')]
file_entry_filter = file_entry_filters.NamesFileEntryFilter(names)
self._filter_collection.AddFilter(file_entry_filter) | 0.004706 |
def set_return_listener(self, cb):
'''
Set a callback for basic.return listening. Will be called with a single
Message argument.
The return_info attribute of the Message will have the following
properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
RabbitMQ NOTE: if the channel was in confirmation mode when the message
was published, then basic.return will still be followed by basic.ack
later.
:param cb: callable cb(Message); pass None to reset
'''
if cb is not None and not callable(cb):
raise ValueError('return_listener callback must either be None or '
'a callable, but got: %r' % (cb,))
self._return_listener = cb | 0.002183 |
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator | 0.006042 |
def get_ancestors(self):
"""
:returns: A queryset containing the current node object's ancestors,
starting by the root node and descending to the parent.
"""
if self.is_root():
return get_result_class(self.__class__).objects.none()
return get_result_class(self.__class__).objects.filter(
tree_id=self.tree_id,
lft__lt=self.lft,
rgt__gt=self.rgt) | 0.004494 |
def drain(self, sid=None):
"""
Drain will put a connection into a drain state. All subscriptions will
immediately be put into a drain state. Upon completion, the publishers
will be drained and can not publish any additional messages. Upon draining
of the publishers, the connection will be closed. Use the `closed_cb'
option to know when the connection has moved from draining to closed.
If a sid is passed, just the subscription with that sid will be drained
without closing the connection.
"""
if self.is_draining:
return
if self.is_closed:
raise ErrConnectionClosed
if self.is_connecting or self.is_reconnecting:
raise ErrConnectionReconnecting
if sid is not None:
return self._drain_sub(sid)
# Start draining the subscriptions
self._status = Client.DRAINING_SUBS
drain_tasks = []
for ssid, sub in self._subs.items():
task = self._drain_sub(ssid)
drain_tasks.append(task)
drain_is_done = asyncio.gather(*drain_tasks)
try:
yield from asyncio.wait_for(drain_is_done, self.options["drain_timeout"])
except asyncio.TimeoutError:
drain_is_done.exception()
drain_is_done.cancel()
if self._error_cb is not None:
yield from self._error_cb(ErrDrainTimeout)
except asyncio.CancelledError:
pass
finally:
self._status = Client.DRAINING_PUBS
yield from self.flush()
yield from self._close(Client.CLOSED) | 0.002413 |
def update(self, id, body):
"""Modifies a connection.
Args:
id: Id of the connection.
body (dict): Specifies which fields are to be modified, and to what
values.
See: https://auth0.com/docs/api/management/v2#!/Connections/patch_connections_by_id
Returns:
The modified connection object.
"""
return self.client.patch(self._url(id), data=body) | 0.006726 |
def which(program):
'''
Emulate unix 'which' command. If program is a path to an executable file
(i.e. it contains any directory components, like './myscript'), return
program. Otherwise, if an executable file matching program is found in one
of the directories in the PATH environment variable, return the first match
found.
On Windows, if PATHEXT is defined and program does not include an
extension, include the extensions in PATHEXT when searching for a matching
executable file.
Return None if no executable file is found.
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
https://github.com/webcoyote/vagrant/blob/f70507062e3b30c00db1f0d8b90f9245c4c997d4/lib/vagrant/util/file_util.rb
Python3.3+ implementation:
https://hg.python.org/cpython/file/default/Lib/shutil.py
'''
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
# Shortcut: If program contains any dir components, do not search the path
# e.g. './backup', '/bin/ls'
if os.path.dirname(program):
if is_exe(program):
return program
else:
return None
# Are we on windows?
# http://stackoverflow.com/questions/1325581/how-do-i-check-if-im-running-on-windows-in-python
windows = (os.name == 'nt')
# Or cygwin?
# https://docs.python.org/2/library/sys.html#sys.platform
cygwin = sys.platform.startswith('cygwin')
# Paths: a list of directories
path_str = os.environ.get('PATH', os.defpath)
if not path_str:
paths = []
else:
paths = path_str.split(os.pathsep)
# The current directory takes precedence on Windows.
if windows:
paths.insert(0, os.curdir)
# Only search PATH if there is one to search.
if not paths:
return None
# Files: add any necessary extensions to program
# On cygwin and non-windows systems do not add extensions when searching
# for the executable
if cygwin or not windows:
files = [program]
else:
# windows path extensions in PATHEXT.
# e.g. ['.EXE', '.CMD', '.BAT']
# http://environmentvariables.org/PathExt
# This might not properly use extensions that have been "registered" in
# Windows. In the future it might make sense to use one of the many
# "which" packages on PyPI.
exts = os.environ.get('PATHEXT', '').split(os.pathsep)
# if the program ends with one of the extensions, only test that one.
# otherwise test all the extensions.
matching_exts = [ext for ext in exts if
program.lower().endswith(ext.lower())]
if matching_exts:
files = [program + ext for ext in matching_exts]
else:
files = [program + ext for ext in exts]
# Check each combination of path, program, and extension, returning
# the first combination that exists and is executable.
for path in paths:
for f in files:
fpath = os.path.normcase(os.path.join(path, f))
if is_exe(fpath):
return fpath
return None | 0.000313 |
def _build(self, images):
"""Build dilation module.
Args:
images: Tensor of shape [batch_size, height, width, depth]
and dtype float32. Represents a set of images with an arbitrary depth.
Note that when using the default initializer, depth must equal
num_output_classes.
Returns:
Tensor of shape [batch_size, height, width, num_output_classes] and dtype
float32. Represents, for each image and pixel, logits for per-class
predictions.
Raises:
IncompatibleShapeError: If images is not rank 4.
ValueError: If model_size is not one of 'basic' or 'large'.
"""
num_classes = self._num_output_classes
if len(images.get_shape()) != 4:
raise base.IncompatibleShapeError(
"'images' must have shape [batch_size, height, width, depth].")
if self.WEIGHTS not in self._initializers:
if self._model_size == self.BASIC:
self._initializers[self.WEIGHTS] = identity_kernel_initializer
elif self._model_size == self.LARGE:
self._initializers[self.WEIGHTS] = noisy_identity_kernel_initializer(
num_classes)
else:
raise ValueError("Unrecognized model_size: %s" % self._model_size)
if self.BIASES not in self._initializers:
self._initializers[self.BIASES] = tf.zeros_initializer()
if self._model_size == self.BASIC:
self._conv_modules = [
self._dilated_conv_layer(num_classes, 1, True, "conv1"),
self._dilated_conv_layer(num_classes, 1, True, "conv2"),
self._dilated_conv_layer(num_classes, 2, True, "conv3"),
self._dilated_conv_layer(num_classes, 4, True, "conv4"),
self._dilated_conv_layer(num_classes, 8, True, "conv5"),
self._dilated_conv_layer(num_classes, 16, True, "conv6"),
self._dilated_conv_layer(num_classes, 1, True, "conv7"),
self._dilated_conv_layer(num_classes, 1, False, "conv8"),
]
elif self._model_size == self.LARGE:
self._conv_modules = [
self._dilated_conv_layer(2 * num_classes, 1, True, "conv1"),
self._dilated_conv_layer(2 * num_classes, 1, True, "conv2"),
self._dilated_conv_layer(4 * num_classes, 2, True, "conv3"),
self._dilated_conv_layer(8 * num_classes, 4, True, "conv4"),
self._dilated_conv_layer(16 * num_classes, 8, True, "conv5"),
self._dilated_conv_layer(32 * num_classes, 16, True, "conv6"),
self._dilated_conv_layer(32 * num_classes, 1, True, "conv7"),
self._dilated_conv_layer(num_classes, 1, False, "conv8"),
]
else:
raise ValueError("Unrecognized model_size: %s" % self._model_size)
dilation_mod = sequential.Sequential(self._conv_modules, name="dilation")
return dilation_mod(images) | 0.003218 |
def pipe(value, *functions, funcs=None):
"""pipe(value, f, g, h) == h(g(f(value)))"""
if funcs:
functions = funcs
for function in functions:
value = function(value)
return value | 0.004785 |
def revfile_path(self):
"""
:return: The full path of revision file.
:rtype: str
"""
return os.path.normpath(os.path.join(
os.getcwd(),
self.config.revision_file
)) | 0.008475 |
def generate_hash_id(node):
"""
Generates a hash_id for the node in question.
:param node: lxml etree node
"""
try:
content = tostring(node)
except Exception:
logger.exception("Generating of hash failed")
content = to_bytes(repr(node))
hash_id = md5(content).hexdigest()
return hash_id[:8] | 0.002882 |
def subwave(wave, dep_name=None, indep_min=None, indep_max=None, indep_step=None):
r"""
Return a waveform that is a sub-set of a waveform, potentially re-sampled.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param dep_name: Independent variable name
:type dep_name: `NonNullString <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#nonnullstring>`_
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:param indep_step: Independent vector step
:type indep_step: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.subwave
:raises:
* RuntimeError (Argument \`dep_name\` is not valid)
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`indep_step\` is greater than independent
vector range)
* RuntimeError (Argument \`indep_step\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
if dep_name is not None:
ret.dep_name = dep_name
_bound_waveform(ret, indep_min, indep_max)
pexdoc.addai("indep_step", bool((indep_step is not None) and (indep_step <= 0)))
exmsg = "Argument `indep_step` is greater than independent vector range"
cond = bool(
(indep_step is not None)
and (indep_step > ret._indep_vector[-1] - ret._indep_vector[0])
)
pexdoc.addex(RuntimeError, exmsg, cond)
if indep_step:
indep_vector = _barange(indep_min, indep_max, indep_step)
dep_vector = _interp_dep_vector(ret, indep_vector)
ret._set_indep_vector(indep_vector, check=False)
ret._set_dep_vector(dep_vector, check=False)
return ret | 0.001381 |
def read_long_description(readme_file):
""" Read package long description from README file """
try:
import pypandoc
except (ImportError, OSError) as exception:
print('No pypandoc or pandoc: %s' % (exception,))
if sys.version_info.major == 3:
handle = open(readme_file, encoding='utf-8')
else:
handle = open(readme_file)
long_description = handle.read()
handle.close()
return long_description
else:
return pypandoc.convert(readme_file, 'rst') | 0.001835 |
def _concat(self, other):
"""
Concatenate this with other to one wider value/signal
"""
w = self._dtype.bit_length()
try:
other_bit_length = other._dtype.bit_length
except AttributeError:
raise TypeError("Can not concat bits and", other._dtype)
other_w = other_bit_length()
resWidth = w + other_w
resT = Bits(resWidth)
if areValues(self, other):
return self._concat__val(other)
else:
w = self._dtype.bit_length()
other_w = other._dtype.bit_length()
resWidth = w + other_w
resT = Bits(resWidth)
# is instance of signal
if isinstance(other, InterfaceBase):
other = other._sig
if isinstance(other._dtype, Bits):
if other._dtype.signed is not None:
other = other._vec()
elif other._dtype == BOOL:
other = other._auto_cast(BIT)
else:
raise TypeError(other._dtype)
if self._dtype.signed is not None:
self = self._vec()
return Operator.withRes(AllOps.CONCAT, [self, other], resT)\
._auto_cast(Bits(resWidth,
signed=self._dtype.signed)) | 0.001472 |
def bans_list(self, limit=None, max_id=None, since_id=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/bans#get-all-bans"
api_path = "/api/v2/bans"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if limit:
api_query.update({
"limit": limit,
})
if max_id:
api_query.update({
"max_id": max_id,
})
if since_id:
api_query.update({
"since_id": since_id,
})
return self.call(api_path, query=api_query, **kwargs) | 0.002937 |
def format_container_name(name, special_characters=None):
'''format_container_name will take a name supplied by the user,
remove all special characters (except for those defined by "special-characters"
and return the new image name.
'''
if special_characters is None:
special_characters = []
return ''.join(e.lower()
for e in name if e.isalnum() or e in special_characters) | 0.004717 |
def url(section="postGIS", config_file=None):
""" Retrieve the URL used to connect to the database.
Use this if you have your own means of accessing the database and do not
want to use :func:`engine` or :func:`connection`.
Parameters
----------
section : str, optional
The `config.ini` section corresponding to the targeted database.
It should contain all the details that needed to set up a connection.
Returns
-------
database URL : str
The URL with which one can connect to the database. Be careful as this
will probably contain sensitive data like the username/password
combination.
config_file : str, optional
Relative of absolute of config.ini. If not specified, it tries to read
from .oemof/config.ini in your HOME dir
Notes
-----
For documentation on config.ini see the README section on
:ref:`configuring <readme#configuration>` :mod:`oemof.db`.
"""
cfg.load_config(config_file)
try:
pw = keyring.get_password(cfg.get(section, "database"),
cfg.get(section, "username"))
except NoSectionError as e:
print("There is no section {section} in your config file. Please "
"choose one available section from your config file or "
"specify a new one!".format(
section=section))
exit(-1)
if pw is None:
try:
pw = cfg.get(section, "pw")
except option:
pw = getpass.getpass(prompt="No password available in your "\
"keyring for database {database}. "
"\n\nEnter your password to " \
"store it in "
"keyring:".format(database=section))
keyring.set_password(section, cfg.get(section, "username"), pw)
except NoSectionError:
print("Unable to find the 'postGIS' section in oemof's config." +
"\nExiting.")
exit(-1)
return "postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}".format(
user=cfg.get(section, "username"),
passwd=pw,
host=cfg.get(section, "host"),
db=cfg.get(section, "database"),
port=int(cfg.get(section, "port"))) | 0.002089 |
def read_dir(directory):
'''Returns the text of all files in a directory.'''
content = dir_list(directory)
text = ''
for filename in content:
text += read_file(directory + '/' + filename)
text += ' '
return text | 0.004049 |
def _update_dict(data, default_data, replace_data=False):
'''Update algorithm definition type dictionaries'''
if not data:
data = default_data.copy()
return data
if not isinstance(data, dict):
raise TypeError('Value not dict type')
if len(data) > 255:
raise ValueError('More than 255 values defined')
for i in data.keys():
if not isinstance(i, int):
raise TypeError('Index not int type')
if i < 0 or i > 255:
raise ValueError('Index value out of range')
if not replace_data:
data.update(default_data)
return data | 0.002907 |
def dump_stack_peek(data, separator = ' ', width = 16, arch = None):
"""
Dump data from pointers guessed within the given stack dump.
@type data: str
@param data: Dictionary mapping stack offsets to the data they point to.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
This value is also used for padding.
@type arch: str
@param arch: Architecture of the machine whose registers were dumped.
Defaults to the current architecture.
@rtype: str
@return: Text suitable for logging.
"""
if data is None:
return ''
if arch is None:
arch = win32.arch
pointers = compat.keys(data)
pointers.sort()
result = ''
if pointers:
if arch == win32.ARCH_I386:
spreg = 'esp'
elif arch == win32.ARCH_AMD64:
spreg = 'rsp'
else:
spreg = 'STACK' # just a generic tag
tag_fmt = '[%s+0x%%.%dx]' % (spreg, len( '%x' % pointers[-1] ) )
for offset in pointers:
dumped = HexDump.hexline(data[offset], separator, width)
tag = tag_fmt % offset
result += '%s -> %s\n' % (tag, dumped)
return result | 0.00983 |
def write_config(self, cfg, slot=1):
""" Write a configuration to the YubiKey. """
cfg_req_ver = cfg.version_required()
if cfg_req_ver > self.version_num():
raise yubikey_base.YubiKeyVersionError('Configuration requires YubiKey version %i.%i (this is %s)' % \
(cfg_req_ver[0], cfg_req_ver[1], self.version()))
if not self.capabilities.have_configuration_slot(slot):
raise YubiKeyUSBHIDError("Can't write configuration to slot %i" % (slot))
return self._device._write_config(cfg, slot) | 0.011706 |
def dict_array_bytes(ary, template):
"""
Return the number of bytes required by an array
Arguments
---------------
ary : dict
Dictionary representation of an array
template : dict
A dictionary of key-values, used to replace any
string values in the array with concrete integral
values
Returns
-----------
The number of bytes required to represent
the array.
"""
shape = shape_from_str_tuple(ary['shape'], template)
dtype = dtype_from_str(ary['dtype'], template)
return array_bytes(shape, dtype) | 0.001709 |
def load_file(self, file_name):
"""
Find and return the template with the given file name.
Arguments:
file_name: the file name of the template.
"""
locator = self._make_locator()
path = locator.find_file(file_name, self.search_dirs)
return self.read(path) | 0.006135 |
def speakerDiarizationEvaluateScript(folder_name, ldas):
'''
This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
SEGMENT (ground-truth) files are stored
- ldas: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folder_name, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wav_file in wavFilesList:
gt_file = wav_file.replace('.wav', '.segments');
if os.path.isfile(gt_file):
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
N.append(len(list(set(seg_labs))))
else:
N.append(-1)
for l in ldas:
print("LDA = {0:d}".format(l))
for i, wav_file in enumerate(wavFilesList):
speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False)
print | 0.007943 |
def cornerplot(results, span=None, quantiles=[0.025, 0.5, 0.975],
color='black', smooth=0.02, hist_kwargs=None,
hist2d_kwargs=None, labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color='red', truth_kwargs=None,
max_n_ticks=5, top_ticks=False, use_math_text=False,
verbose=False, fig=None):
"""
Generate a corner plot of the 1-D and 2-D marginalized posteriors.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run. **Compatible with results derived from**
`nestle <http://kylebarbary.com/nestle/>`_.
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.999999426697` (5-sigma credible interval).
quantiles : iterable, optional
A list of fractional quantiles to overplot on the 1-D marginalized
posteriors as vertical dashed lines. Default is `[0.025, 0.5, 0.975]`
(spanning the 95%/2-sigma credible interval).
color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the histograms.
Default is `'black'`.
smooth : float or iterable with shape (ndim,), optional
The standard deviation (either a single value or a different value for
each subplot) for the Gaussian kernel used to smooth the 1-D and 2-D
marginalized posteriors, expressed as a fraction of the span.
Default is `0.02` (2% smoothing). If an integer is provided instead,
this will instead default to a simple (weighted) histogram with
`bins=smooth`.
hist_kwargs : dict, optional
Extra keyword arguments to send to the 1-D (smoothed) histograms.
hist2d_kwargs : dict, optional
Extra keyword arguments to send to the 2-D (smoothed) histograms.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
show_titles : bool, optional
Whether to display a title above each 1-D marginalized posterior
showing the 0.5 quantile along with the upper/lower bounds associated
with the 0.025 and 0.975 (95%/2-sigma credible interval) quantiles.
Default is `True`.
title_fmt : str, optional
The format string for the quantiles provided in the title. Default is
`'.2f'`.
title_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_title` command.
truths : iterable with shape (ndim,), optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
top_ticks : bool, optional
Whether to label the top (rather than bottom) ticks. Default is
`False`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
verbose : bool, optional
Whether to print the values of the computed quantiles associated with
each parameter. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the traces and marginalized 1-D posteriors
onto the provided figure. Otherwise, by default an
internal figure is generated.
Returns
-------
cornerplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output corner plot.
"""
# Initialize values.
if quantiles is None:
quantiles = []
if truth_kwargs is None:
truth_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if title_kwargs is None:
title_kwargs = dict()
if hist_kwargs is None:
hist_kwargs = dict()
if hist2d_kwargs is None:
hist2d_kwargs = dict()
# Set defaults.
hist_kwargs['alpha'] = hist_kwargs.get('alpha', 0.6)
hist2d_kwargs['alpha'] = hist2d_kwargs.get('alpha', 0.6)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
truth_kwargs['alpha'] = truth_kwargs.get('alpha', 0.7)
# Extract weighted samples.
samples = results['samples']
try:
weights = np.exp(results['logwt'] - results['logz'][-1])
except:
weights = results['weights']
# Deal with 1D results. A number of extra catches are also here
# in case users are trying to plot other results besides the `Results`
# instance generated by `dynesty`.
samples = np.atleast_1d(samples)
if len(samples.shape) == 1:
samples = np.atleast_2d(samples)
else:
assert len(samples.shape) == 2, "Samples must be 1- or 2-D."
samples = samples.T
assert samples.shape[0] <= samples.shape[1], "There are more " \
"dimensions than samples!"
ndim, nsamps = samples.shape
# Check weights.
if weights.ndim != 1:
raise ValueError("Weights must be 1-D.")
if nsamps != weights.shape[0]:
raise ValueError("The number of weights and samples disagree!")
# Determine plotting bounds.
if span is None:
span = [0.999999426697 for i in range(ndim)]
span = list(span)
if len(span) != ndim:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(samples[i], q, weights=weights)
# Set labels
if labels is None:
labels = [r"$x_{"+str(i+1)+"}$" for i in range(ndim)]
# Setting up smoothing.
if (isinstance(smooth, int_type) or isinstance(smooth, float_type)):
smooth = [smooth for i in range(ndim)]
# Setup axis layout (from `corner.py`).
factor = 2.0 # size of side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # size of width/height margin
plotdim = factor * ndim + factor * (ndim - 1.) * whspace # plot size
dim = lbdim + plotdim + trdim # total size
# Initialize figure.
if fig is None:
fig, axes = pl.subplots(ndim, ndim, figsize=(dim, dim))
else:
try:
fig, axes = fig
axes = np.array(axes).reshape((ndim, ndim))
except:
raise ValueError("Mismatch between axes and dimension.")
# Format figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Plotting.
for i, x in enumerate(samples):
if np.shape(samples)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the 1-D marginalized posteriors.
# Setup axes
ax.set_xlim(span[i])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
ax.yaxis.set_major_locator(NullLocator())
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
if i < ndim - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# Generate distribution.
sx = smooth[i]
if isinstance(sx, int_type):
# If `sx` is an integer, plot a weighted histogram with
# `sx` bins within the provided bounds.
n, b, _ = ax.hist(x, bins=sx, weights=weights, color=color,
range=np.sort(span[i]), **hist_kwargs)
else:
# If `sx` is a float, oversample the data relative to the
# smoothing filter by a factor of 10, then use a Gaussian
# filter to smooth the results.
bins = int(round(10. / sx))
n, b = np.histogram(x, bins=bins, weights=weights,
range=np.sort(span[i]))
n = norm_kde(n, 10.)
b0 = 0.5 * (b[1:] + b[:-1])
n, b, _ = ax.hist(b0, bins=b, weights=n,
range=np.sort(span[i]), color=color,
**hist_kwargs)
ax.set_ylim([0., max(n) * 1.05])
# Plot quantiles.
if quantiles is not None and len(quantiles) > 0:
qs = _quantile(x, quantiles, weights=weights)
for q in qs:
ax.axvline(q, lw=2, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print(labels[i], [blob for blob in zip(quantiles, qs)])
# Add truth value(s).
if truths is not None and truths[i] is not None:
try:
[ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[i]]
except:
ax.axvline(truths[i], color=truth_color, **truth_kwargs)
# Set titles.
if show_titles:
title = None
if title_fmt is not None:
ql, qm, qh = _quantile(x, [0.025, 0.5, 0.975], weights=weights)
q_minus, q_plus = qm - ql, qh - qm
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(qm), fmt(q_minus), fmt(q_plus))
title = "{0} = {1}".format(labels[i], title)
ax.set_title(title, **title_kwargs)
for j, y in enumerate(samples):
if np.shape(samples)[0] == 1:
ax = axes
else:
ax = axes[i, j]
# Plot the 2-D marginalized posteriors.
# Setup axes.
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.yaxis.set_major_formatter(sf)
if i < ndim - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# Generate distribution.
sy = smooth[j]
check_ix = isinstance(sx, int_type)
check_iy = isinstance(sy, int_type)
if check_ix and check_iy:
fill_contours = False
plot_contours = False
else:
fill_contours = True
plot_contours = True
hist2d_kwargs['fill_contours'] = hist2d_kwargs.get('fill_contours',
fill_contours)
hist2d_kwargs['plot_contours'] = hist2d_kwargs.get('plot_contours',
plot_contours)
_hist2d(y, x, ax=ax, span=[span[j], span[i]],
weights=weights, color=color, smooth=[sy, sx],
**hist2d_kwargs)
# Add truth values
if truths is not None:
if truths[j] is not None:
try:
[ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[j]]
except:
ax.axvline(truths[j], color=truth_color,
**truth_kwargs)
if truths[i] is not None:
try:
[ax.axhline(t, color=truth_color, **truth_kwargs)
for t in truths[i]]
except:
ax.axhline(truths[i], color=truth_color,
**truth_kwargs)
return (fig, axes) | 0.000751 |
def _skew(self, x, z, d=0):
""" returns the kurtosis parameter for direction d, d=0 is rho, d=1 is z """
# get the top bound determined by the kurtosis
kval = (np.tanh(self._poly(z, self._kurtosis_coeffs(d)))+1)/12.
bdpoly = np.array([
-1.142468e+04, 3.0939485e+03, -2.0283568e+02,
-2.1047846e+01, 3.79808487e+00, 1.19679781e-02
])
top = np.polyval(bdpoly, kval)
# limit the skewval to be 0 -> top val
skew = self._poly(z, self._skew_coeffs(d))
skewval = top*(np.tanh(skew) + 1) - top
return skewval*(3*x - x**3) | 0.004847 |
def editPerson(self, person, nickname, edits):
"""
Change the name and contact information associated with the given
L{Person}.
@type person: L{Person}
@param person: The person which will be modified.
@type nickname: C{unicode}
@param nickname: The new value for L{Person.name}
@type edits: C{list}
@param edits: list of tuples of L{IContactType} providers and
corresponding L{ListChanges} objects or dictionaries of parameter
values.
"""
for existing in self.store.query(Person, Person.name == nickname):
if existing is person:
continue
raise ValueError(
"A person with the name %r exists already." % (nickname,))
oldname = person.name
person.name = nickname
self._callOnOrganizerPlugins('personNameChanged', person, oldname)
for contactType, submission in edits:
if contactType.allowMultipleContactItems:
for edit in submission.edit:
self.editContactItem(
contactType, edit.object, edit.values)
for create in submission.create:
create.setter(
self.createContactItem(
contactType, person, create.values))
for delete in submission.delete:
delete.deleteFromStore()
else:
(contactItem,) = contactType.getContactItems(person)
self.editContactItem(
contactType, contactItem, submission) | 0.001221 |
def isTagEqual(self, other):
'''
isTagEqual - Compare if a tag contains the same tag name and attributes as another tag,
i.e. if everything between < and > parts of this tag are the same.
Does NOT compare children, etc. Does NOT compare if these are the same exact tag in the html (use regular == operator for that)
So for example:
tag1 = document.getElementById('something')
tag2 = copy.copy(tag1)
tag1 == tag2 # This is False
tag1.isTagEqual(tag2) # This is True
@return bool - True if tags have the same name and attributes, otherwise False
'''
# if type(other) != type(self):
# return False
# NOTE: Instead of type check,
# just see if we can get the needed attributes in case subclassing
try:
if self.tagName != other.tagName:
return False
myAttributes = self._attributes
otherAttributes = other._attributes
attributeKeysSelf = list(myAttributes.keys())
attributeKeysOther = list(otherAttributes.keys())
except:
return False
# Check that we have all the same attribute names
if set(attributeKeysSelf) != set(attributeKeysOther):
return False
for key in attributeKeysSelf:
if myAttributes.get(key) != otherAttributes.get(key):
return False
return True | 0.004493 |
def cli(ctx, feature_id, organism="", sequence=""):
"""Set the feature to read through the first encountered stop codon
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.set_readthrough_stop_codon(feature_id, organism=organism, sequence=sequence) | 0.00639 |
def is_denied(self, role, method, resource):
"""Check wherther role is denied to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
"""
return (role, method, resource) in self._denied | 0.00639 |
def save_segment(self, f, segment, checksum=None):
""" Save the next segment to the image file, return next checksum value if provided """
segment_data = self.maybe_patch_segment_data(f, segment.data)
f.write(struct.pack('<II', segment.addr, len(segment_data)))
f.write(segment_data)
if checksum is not None:
return ESPLoader.checksum(segment_data, checksum) | 0.007317 |
def operations():
"""
Class decorator stores all calls into list.
Can be used until .invalidate() is called.
:return: decorated class
"""
def decorator(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
self = args[0]
assert self.__can_use, "User operation queue only in 'with' block"
def defaults_dict():
f_args, varargs, keywords, defaults = inspect.getargspec(func)
defaults = defaults or []
return dict(zip(f_args[-len(defaults)+len(args[1:]):], defaults[len(args[1:]):]))
route_args = dict(defaults_dict().items() + kwargs.items())
func(*args, **kwargs)
self.operations.append((func.__name__, args[1:], route_args, ))
return wrapped_func
def decorate(clazz):
for attr in clazz.__dict__:
if callable(getattr(clazz, attr)):
setattr(clazz, attr, decorator(getattr(clazz, attr)))
def __init__(self): # simple parameter-less constructor
self.operations = []
self.__can_use = True
def invalidate(self):
self.__can_use = False
clazz.__init__ = __init__
clazz.invalidate = invalidate
return clazz
return decorate | 0.003823 |
def fetch_room_ids(self, names):
""" Fetches the ids of the rooms with the given names """
ret = dict()
names_set = frozenset(names)
for d in json.loads(self.open_url(
'timetables?type=location'))['timetable']:
name = d['hostKey']
if name in names_set:
ret[name] = d['value'].encode('utf-8')
return ret | 0.004878 |
def hist(x, bins=10, labels=None, aspect="auto", plot=True, ax=None, range=None):
"""
Creates a histogram of data *x* with a *bins*, *labels* = :code:`[title, xlabel, ylabel]`.
"""
h, edge = _np.histogram(x, bins=bins, range=range)
mids = edge + (edge[1]-edge[0])/2
mids = mids[:-1]
if plot:
if ax is None:
_plt.hist(x, bins=bins, range=range)
else:
ax.hist(x, bins=bins, range=range)
if labels is not None:
_addlabel(labels[0], labels[1], labels[2])
return h, mids | 0.007067 |
def jobs_insert_query(self, sql, table_name=None, append=False,
overwrite=False, dry_run=False, use_cache=True, batch=True,
allow_large_results=False, table_definitions=None, query_params=None):
"""Issues a request to insert a query job.
Args:
sql: the SQL string representing the query to execute.
table_name: None for an anonymous table, or a name parts tuple for a long-lived table.
append: if True, append to the table if it is non-empty; else the request will fail if table
is non-empty unless overwrite is True.
overwrite: if the table already exists, truncate it instead of appending or raising an
Exception.
dry_run: whether to actually execute the query or just dry run it.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified.
batch: whether to run this as a batch job (lower priority) or as an interactive job (high
priority, more expensive).
allow_large_results: whether to allow large results (slower with some restrictions but
can handle big jobs).
table_definitions: a dictionary of ExternalDataSource names and objects for any external
tables referenced in the query.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (self.project_id, ''))
data = {
'kind': 'bigquery#job',
'configuration': {
'query': {
'query': sql,
'useQueryCache': use_cache,
'allowLargeResults': allow_large_results,
'useLegacySql': False
},
'dryRun': dry_run,
'priority': 'BATCH' if batch else 'INTERACTIVE',
},
}
query_config = data['configuration']['query']
if table_definitions:
expanded_definitions = {}
for td in table_definitions:
expanded_definitions[td] = table_definitions[td]._to_query_json()
query_config['tableDefinitions'] = expanded_definitions
if table_name:
query_config['destinationTable'] = {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
}
if append:
query_config['writeDisposition'] = "WRITE_APPEND"
elif overwrite:
query_config['writeDisposition'] = "WRITE_TRUNCATE"
if self.bigquery_billing_tier:
query_config['maximumBillingTier'] = self.bigquery_billing_tier
if query_params:
query_config['queryParameters'] = query_params
return google.datalab.utils.Http.request(url, data=data, credentials=self.credentials) | 0.007234 |
def get_op_result_name(left, right):
"""
Find the appropriate name to pin to an operation result. This result
should always be either an Index or a Series.
Parameters
----------
left : {Series, Index}
right : object
Returns
-------
name : object
Usually a string
"""
# `left` is always a pd.Series when called from within ops
if isinstance(right, (ABCSeries, pd.Index)):
name = _maybe_match_name(left, right)
else:
name = left.name
return name | 0.001887 |
def beta(self):
"""
Fixed-effect sizes.
Returns
-------
effect-sizes : numpy.ndarray
Optimal fixed-effect sizes.
Notes
-----
Setting the derivative of log(p(𝐲)) over effect sizes equal
to zero leads to solutions 𝜷 from equation ::
(QᵀX)ᵀD⁻¹(QᵀX)𝜷 = (QᵀX)ᵀD⁻¹(Qᵀ𝐲).
"""
from numpy_sugar.linalg import rsolve
return rsolve(self._X["VT"], rsolve(self._X["tX"], self.mean())) | 0.00404 |
def count(self):
"""
Return the number of hits matching the query and filters. Note that
only the actual number is returned.
"""
if hasattr(self, '_response'):
return self._response.hits.total
es = connections.get_connection(self._using)
d = self.to_dict(count=True)
# TODO: failed shards detection
return es.count(
index=self._index,
body=d,
**self._params
)['count'] | 0.004016 |
def user_factory(self):
"""Retrieve the current user (or None) from the database."""
if this.user_id is None:
return None
return self.user_model.objects.get(pk=this.user_id) | 0.009569 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.