code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _get_assessment_taken(self, assessment_taken_id):
"""Helper method for getting an AssessmentTaken objects given an Id."""
if assessment_taken_id not in self._assessments_taken:
mgr = self._get_provider_manager('ASSESSMENT')
lookup_session = mgr.get_assessment_taken_lookup_session(proxy=self._proxy) # Should this be _for_bank?
lookup_session.use_federated_bank_view()
self._assessments_taken[assessment_taken_id] = (
lookup_session.get_assessment_taken(assessment_taken_id))
return self._assessments_taken[assessment_taken_id] | Helper method for getting an AssessmentTaken objects given an Id. |
def get_next_first(intersection, intersections, to_end=True):
"""Gets the next node along the current (first) edge.
.. note::
This is a helper used only by :func:`get_next`, which in
turn is only used by :func:`basic_interior_combine`, which itself
is only used by :func:`combine_intersections`.
Along with :func:`get_next_second`, this function does the majority of the
heavy lifting in :func:`get_next`. **Very** similar to
:func:`get_next_second`, but this works with the first curve while the
other function works with the second.
Args:
intersection (.Intersection): The current intersection.
intersections (List[.Intersection]): List of all detected
intersections, provided as a reference for potential
points to arrive at.
to_end (Optional[bool]): Indicates if the next node should just be
the end of the first edge or :data:`None`.
Returns:
Optional[.Intersection]: The "next" point along a surface of
intersection. This will produce the next intersection along the
current (first) edge or the end of the same edge. If ``to_end`` is
:data:`False` and there are no other intersections along the current
edge, will return :data:`None` (rather than the end of the same edge).
"""
along_edge = None
index_first = intersection.index_first
s = intersection.s
for other_int in intersections:
other_s = other_int.s
if other_int.index_first == index_first and other_s > s:
if along_edge is None or other_s < along_edge.s:
along_edge = other_int
if along_edge is None:
if to_end:
# If there is no other intersection on the edge, just return
# the segment end.
return _intersection_helpers.Intersection(
index_first,
1.0,
None,
None,
interior_curve=CLASSIFICATION_T.FIRST,
)
else:
return None
else:
return along_edge | Gets the next node along the current (first) edge.
.. note::
This is a helper used only by :func:`get_next`, which in
turn is only used by :func:`basic_interior_combine`, which itself
is only used by :func:`combine_intersections`.
Along with :func:`get_next_second`, this function does the majority of the
heavy lifting in :func:`get_next`. **Very** similar to
:func:`get_next_second`, but this works with the first curve while the
other function works with the second.
Args:
intersection (.Intersection): The current intersection.
intersections (List[.Intersection]): List of all detected
intersections, provided as a reference for potential
points to arrive at.
to_end (Optional[bool]): Indicates if the next node should just be
the end of the first edge or :data:`None`.
Returns:
Optional[.Intersection]: The "next" point along a surface of
intersection. This will produce the next intersection along the
current (first) edge or the end of the same edge. If ``to_end`` is
:data:`False` and there are no other intersections along the current
edge, will return :data:`None` (rather than the end of the same edge). |
def psd(self):
"""
A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation.
"""
if not self._psd:
errMsg = "The PSD has not been set in the metricParameters "
errMsg += "instance."
raise ValueError(errMsg)
return self._psd | A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation. |
def save_ds9(output, filename):
"""Save ds9 region output info filename.
Parameters
----------
output : str
String containing the full output to be exported as a ds9 region
file.
filename : str
Output file name.
"""
ds9_file = open(filename, 'wt')
ds9_file.write(output)
ds9_file.close() | Save ds9 region output info filename.
Parameters
----------
output : str
String containing the full output to be exported as a ds9 region
file.
filename : str
Output file name. |
def to_dict(self):
""" Transform an attribute to a dict
"""
data = {}
# mandatory characteristics
data["name"] = self.name
data["description"] = self.description if self.description and len(self.description) else None
data["type"] = self.type if self.type and len(self.type) else None
data["allowed_chars"] = self.allowed_chars if self.allowed_chars and len(self.allowed_chars) else None
data["allowed_choices"] = self.allowed_choices
data["autogenerated"] = self.autogenerated
data["channel"] = self.channel if self.channel and len(self.channel) else None
data["creation_only"] = self.creation_only
data["default_order"] = self.default_order
data["default_value"] = self.default_value if self.default_value and len(self.default_value) else None
data["deprecated"] = self.deprecated
data["exposed"] = self.exposed
data["filterable"] = self.filterable
data["format"] = self.format if self.format and len(self.format) else None
data["max_length"] = int(self.max_length) if self.max_length is not None else None
data["max_value"] = int(self.max_value) if self.max_value is not None else None
data["min_length"] = int(self.min_length) if self.min_length is not None else None
data["min_value"] = int(self.min_value) if self.min_value is not None else None
data["orderable"] = self.orderable
data["read_only"] = self.read_only
data["required"] = self.required
data["transient"] = self.transient
data["unique"] = self.unique
data["uniqueScope"] = self.unique_scope if self.unique_scope and len(self.unique_scope) else None
data["subtype"] = self.subtype if self.subtype and len(self.subtype) else None
data["userlabel"] = self.userlabel if self.userlabel and len(self.userlabel) else None
return data | Transform an attribute to a dict |
def process_blast(
blast_dir,
org_lengths,
fraglengths=None,
mode="ANIb",
identity=0.3,
coverage=0.7,
logger=None,
):
"""Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files
blastfiles = pyani_files.get_input_files(blast_dir, ".blast_tab")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), mode)
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .blast_tab files assuming that the filename format holds:
# org1_vs_org2.blast_tab:
for blastfile in blastfiles:
qname, sname = os.path.splitext(os.path.split(blastfile)[-1])[0].split("_vs_")
# We may have BLAST files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % blastfile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % blastfile
)
continue
resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode)
query_cover = float(resultvals[0]) / org_lengths[qname]
# Populate dataframes: when assigning data, we need to note that
# we have asymmetrical data from BLAST output, so only the
# upper triangle is populated
results.add_tot_length(qname, sname, resultvals[0], sym=False)
results.add_sim_errors(qname, sname, resultvals[1], sym=False)
results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False)
results.add_coverage(qname, sname, query_cover)
return results | Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis. |
def get_users(self, fetch=True):
"""Return this Applications's users object, populating it if fetch
is True."""
return Users(self.resource.users, self.client, populate=fetch) | Return this Applications's users object, populating it if fetch
is True. |
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None) | Reset cached properties. If ``key`` is passed, only clears that key. |
def read(self, ncfile, timegrid_data) -> None:
"""Read the data from the given NetCDF file.
The argument `timegrid_data` defines the data period of the
given NetCDF file.
See the general documentation on class |NetCDFVariableFlat|
for some examples.
"""
array = query_array(ncfile, self.name)
idxs: Tuple[Any] = (slice(None),)
subdev2index = self.query_subdevice2index(ncfile)
for devicename, seq in self.sequences.items():
if seq.NDIM:
if self._timeaxis:
subshape = (array.shape[1],) + seq.shape
else:
subshape = (array.shape[0],) + seq.shape
subarray = numpy.empty(subshape)
temp = devicename + '_'
for prod in self._product(seq.shape):
station = temp + '_'.join(str(idx) for idx in prod)
idx0 = subdev2index.get_index(station)
subarray[idxs+prod] = array[self.get_timeplaceslice(idx0)]
else:
idx = subdev2index.get_index(devicename)
subarray = array[self.get_timeplaceslice(idx)]
seq.series = seq.adjust_series(timegrid_data, subarray) | Read the data from the given NetCDF file.
The argument `timegrid_data` defines the data period of the
given NetCDF file.
See the general documentation on class |NetCDFVariableFlat|
for some examples. |
def resolve_links(self):
"""Attempt to resolve all internal links (locally).
In case the linked resources are found either as members of the array or within
the `includes` element, those will be replaced and reference the actual resources.
No network calls will be performed.
"""
for resource in self.items_mapped['Entry'].values():
for dct in [getattr(resource, '_cf_cda', {}), resource.fields]:
for k, v in dct.items():
if isinstance(v, ResourceLink):
resolved = self._resolve_resource_link(v)
if resolved is not None:
dct[k] = resolved
elif isinstance(v, (MultipleAssets, MultipleEntries, list)):
for idx, ele in enumerate(v):
if not isinstance(ele, ResourceLink):
break
resolved = self._resolve_resource_link(ele)
if resolved is not None:
v[idx] = resolved | Attempt to resolve all internal links (locally).
In case the linked resources are found either as members of the array or within
the `includes` element, those will be replaced and reference the actual resources.
No network calls will be performed. |
def QA_indicator_WR(DataFrame, N, N1):
'威廉指标'
HIGH = DataFrame['high']
LOW = DataFrame['low']
CLOSE = DataFrame['close']
WR1 = 100 * (HHV(HIGH, N) - CLOSE) / (HHV(HIGH, N) - LLV(LOW, N))
WR2 = 100 * (HHV(HIGH, N1) - CLOSE) / (HHV(HIGH, N1) - LLV(LOW, N1))
DICT = {'WR1': WR1, 'WR2': WR2}
return pd.DataFrame(DICT) | 威廉指标 |
def _import_all_modules():
"""dynamically imports all modules in the package"""
import traceback
import os
global results
globals_, locals_ = globals(), locals()
def load_module(modulename, package_module):
try:
names = []
module = __import__(package_module, globals_, locals_, [modulename])
for name in module.__dict__:
if not name.startswith('_'):
globals_[name] = module.__dict__[name]
names.append(name)
except Exception:
traceback.print_exc()
raise
return module, names
def load_dir(abs_dirpath, rel_dirpath=''):
results = []
# dynamically import all the package modules
for filename in os.listdir(abs_dirpath):
rel_filepath = os.path.join(rel_dirpath, filename)
abs_filepath = os.path.join(abs_dirpath, filename)
if filename[0] != '_' and os.path.isfile(abs_filepath) and filename.split('.')[-1] in ('py', 'pyw'):
modulename = '.'.join(os.path.normpath(os.path.splitext(rel_filepath)[0]).split(os.sep))
package_module = '.'.join([__name__, modulename])
module, names = load_module(modulename, package_module)
results += names
elif os.path.isdir(abs_filepath):
results += load_dir(abs_filepath, rel_filepath)
return results
return load_dir(os.path.dirname(__file__)) | dynamically imports all modules in the package |
def add_console_logger(logger, level='info'):
"""
增加console作为日志输入.
"""
logger.setLevel(getattr(logging, level.upper()))
if not logger.handlers:
# Set up color if we are in a tty and curses is installed
color = False
if curses and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except:
pass
console = logging.StreamHandler()
console.setFormatter(_LogFormatter(color=color))
logger.addHandler(console)
return logger | 增加console作为日志输入. |
def remove(self):
""" Remove the directory. """
lib.gp_camera_folder_remove_dir(
self._cam._cam, self.parent.path.encode(), self.name.encode(),
self._cam._ctx) | Remove the directory. |
def transform(self, fn, dtype=None, *args, **kwargs):
"""Equivalent to map, compatibility purpose only.
Column parameter ignored.
"""
rdd = self._rdd.map(fn)
if dtype is None:
return self.__class__(rdd, noblock=True, **self.get_params())
if dtype is np.ndarray:
return ArrayRDD(rdd, bsize=self.bsize, noblock=True)
elif dtype is sp.spmatrix:
return SparseRDD(rdd, bsize=self.bsize, noblock=True)
else:
return BlockRDD(rdd, bsize=self.bsize, dtype=dtype, noblock=True) | Equivalent to map, compatibility purpose only.
Column parameter ignored. |
def get_all_sources(self):
"""
Returns:
OrderedDict: all source file names in the hierarchy, paired with
the names of their subpages.
"""
if self.__all_sources is None:
self.__all_sources = OrderedDict()
self.walk(self.__add_one)
return self.__all_sources | Returns:
OrderedDict: all source file names in the hierarchy, paired with
the names of their subpages. |
def parse_baxter(reading):
"""
Parse a Baxter string and render it with all its contents, namely
initial, medial, final, and tone.
"""
initial = ''
medial = ''
final = ''
tone = ''
# determine environments
inienv = True
medienv = False
finenv = False
tonenv = False
inichars = "pbmrtdnkgnsyhzl'x"
chars = list(reading)
for char in chars:
# switch environments
if char in 'jw' and not finenv:
inienv,medienv,finenv,tonenv = False,True,False,False
elif char not in inichars or finenv:
if char in 'XH':
inienv,medienv,finenv,tonenv = False,False,False,True
else:
inienv,medienv,finenv,tonenv = False,False,True,False
# fill in slots
if inienv:
initial += char
if medienv:
medial += char
if finenv:
final += char
if tonenv:
tone += char
# post-parse tone
if not tone and final[-1] in 'ptk':
tone = 'R'
elif not tone:
tone = 'P'
# post-parse medial
if 'j' not in medial and 'y' in initial:
medial += 'j'
# post-parse labial
if final[0] in 'u' and 'w' not in medial:
medial = 'w' + medial
return initial,medial,final,tone | Parse a Baxter string and render it with all its contents, namely
initial, medial, final, and tone. |
def _app_cache_deepcopy(obj):
"""
An helper that correctly deepcopy model cache state
"""
if isinstance(obj, defaultdict):
return deepcopy(obj)
elif isinstance(obj, dict):
return type(obj)((_app_cache_deepcopy(key), _app_cache_deepcopy(val)) for key, val in obj.items())
elif isinstance(obj, list):
return list(_app_cache_deepcopy(val) for val in obj)
elif isinstance(obj, AppConfig):
app_conf = Empty()
app_conf.__class__ = AppConfig
app_conf.__dict__ = _app_cache_deepcopy(obj.__dict__)
return app_conf
return obj | An helper that correctly deepcopy model cache state |
def fence_point_encode(self, target_system, target_component, idx, count, lat, lng):
'''
A fence point. Used to set a point when from GCS -> MAV. Also used to
return a point from MAV -> GCS
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
idx : point index (first point is 1, 0 is for return point) (uint8_t)
count : total number of points (for sanity checking) (uint8_t)
lat : Latitude of point (float)
lng : Longitude of point (float)
'''
return MAVLink_fence_point_message(target_system, target_component, idx, count, lat, lng) | A fence point. Used to set a point when from GCS -> MAV. Also used to
return a point from MAV -> GCS
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
idx : point index (first point is 1, 0 is for return point) (uint8_t)
count : total number of points (for sanity checking) (uint8_t)
lat : Latitude of point (float)
lng : Longitude of point (float) |
def Var(self, mu=None):
"""Computes the variance of a PMF.
Args:
mu: the point around which the variance is computed;
if omitted, computes the mean
Returns:
float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.iteritems():
var += p * (x - mu) ** 2
return var | Computes the variance of a PMF.
Args:
mu: the point around which the variance is computed;
if omitted, computes the mean
Returns:
float variance |
def model(self, value):
"""The model property.
Args:
value (string). the property value.
"""
if value == self._defaults['ai.device.model'] and 'ai.device.model' in self._values:
del self._values['ai.device.model']
else:
self._values['ai.device.model'] = value | The model property.
Args:
value (string). the property value. |
def migrate(connection, dsn):
""" Collects all migrations and applies missed.
Args:
connection (sqlalchemy connection):
"""
all_migrations = _get_all_migrations()
logger.debug('Collected migrations: {}'.format(all_migrations))
for version, modname in all_migrations:
if _is_missed(connection, version) and version <= SCHEMA_VERSION:
logger.info('Missed migration: {} migration is missed. Migrating...'.format(version))
module = __import__(modname, fromlist='dummy')
# run each migration under its own transaction. This allows us to apply valid migrations
# and break on invalid.
trans = connection.begin()
try:
module.Migration().migrate(connection)
_update_version(connection, version)
trans.commit()
except:
trans.rollback()
logger.error("Failed to migrate '{}' on {} ".format(version, dsn))
raise | Collects all migrations and applies missed.
Args:
connection (sqlalchemy connection): |
def _calculate_status(self, target_freshness, freshness):
"""Calculate the status of a run.
:param dict target_freshness: The target freshness dictionary. It must
match the freshness spec.
:param timedelta freshness: The actual freshness of the data, as
calculated from the database's timestamps
"""
# if freshness > warn_after > error_after, you'll get an error, not a
# warning
for key in ('error', 'warn'):
fullkey = '{}_after'.format(key)
if fullkey not in target_freshness:
continue
target = target_freshness[fullkey]
kwname = target['period'] + 's'
kwargs = {kwname: target['count']}
if freshness > timedelta(**kwargs).total_seconds():
return key
return 'pass' | Calculate the status of a run.
:param dict target_freshness: The target freshness dictionary. It must
match the freshness spec.
:param timedelta freshness: The actual freshness of the data, as
calculated from the database's timestamps |
def from_content(cls, content):
"""Creates an instance of the class from the html content of a highscores page.
Notes
-----
Tibia.com only shows up to 25 entries per page, so in order to obtain the full highscores, all 12 pages must
be parsed and merged into one.
Parameters
----------
content: :class:`str`
The HTML content of the page.
Returns
-------
:class:`Highscores`
The highscores results contained in the page.
Raises
------
InvalidContent
If content is not the HTML of a highscore's page."""
parsed_content = parse_tibiacom_content(content)
tables = cls._parse_tables(parsed_content)
filters = tables.get("Highscores Filter")
if filters is None:
raise InvalidContent("content does is not from the highscores section of Tibia.com")
world_filter, vocation_filter, category_filter = filters
world = world_filter.find("option", {"selected": True})["value"]
if world == "":
return None
category = category_filter.find("option", {"selected": True})["value"]
vocation_selected = vocation_filter.find("option", {"selected": True})
vocation = int(vocation_selected["value"]) if vocation_selected else 0
highscores = cls(world, category, vocation=vocation)
entries = tables.get("Highscores")
if entries is None:
return None
_, header, *rows = entries
info_row = rows.pop()
highscores.results_count = int(results_pattern.search(info_row.text).group(1))
for row in rows:
cols_raw = row.find_all('td')
highscores._parse_entry(cols_raw)
return highscores | Creates an instance of the class from the html content of a highscores page.
Notes
-----
Tibia.com only shows up to 25 entries per page, so in order to obtain the full highscores, all 12 pages must
be parsed and merged into one.
Parameters
----------
content: :class:`str`
The HTML content of the page.
Returns
-------
:class:`Highscores`
The highscores results contained in the page.
Raises
------
InvalidContent
If content is not the HTML of a highscore's page. |
def carrysave_adder(a, b, c, final_adder=ripple_add):
"""
Adds three wirevectors up in an efficient manner
:param WireVector a, b, c : the three wires to add up
:param function final_adder : The adder to use to do the final addition
:return: a wirevector with length 2 longer than the largest input
"""
a, b, c = libutils.match_bitwidth(a, b, c)
partial_sum = a ^ b ^ c
shift_carry = (a | b) & (a | c) & (b | c)
return pyrtl.concat(final_adder(partial_sum[1:], shift_carry), partial_sum[0]) | Adds three wirevectors up in an efficient manner
:param WireVector a, b, c : the three wires to add up
:param function final_adder : The adder to use to do the final addition
:return: a wirevector with length 2 longer than the largest input |
def __cache(self, file, content, document):
"""
Caches given file.
:param file: File to cache.
:type file: unicode
:param content: File content.
:type content: list
:param document: File document.
:type document: QTextDocument
"""
self.__files_cache.add_content(**{file: CacheData(content=content, document=document)}) | Caches given file.
:param file: File to cache.
:type file: unicode
:param content: File content.
:type content: list
:param document: File document.
:type document: QTextDocument |
def valuefrompostdata(self, postdata):
"""This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set()"""
if self.multi: #multi parameters can be passed as parameterid=choiceid1,choiceid2 or by setting parameterid[choiceid]=1 (or whatever other non-zero value)
found = False
if self.id in postdata:
found = True
passedvalues = postdata[self.id].split(',')
values = []
for choicekey in [x[0] for x in self.choices]:
if choicekey in passedvalues:
found = True
values.append(choicekey)
else:
values = []
for choicekey in [x[0] for x in self.choices]:
if self.id+'['+choicekey+']' in postdata:
found = True
if postdata[self.id+'['+choicekey+']']:
values.append(choicekey)
if not found:
return None
else:
return values
else:
if self.id in postdata:
return postdata[self.id]
else:
return None | This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set() |
def setup(level='debug', output=None):
''' Hivy formated logger '''
output = output or settings.LOG['file']
level = level.upper()
handlers = [
logbook.NullHandler()
]
if output == 'stdout':
handlers.append(
logbook.StreamHandler(sys.stdout,
format_string=settings.LOG['format'],
level=level))
else:
handlers.append(
logbook.FileHandler(output,
format_string=settings.LOG['format'],
level=level))
sentry_dns = settings.LOG['sentry_dns']
if sentry_dns:
handlers.append(SentryHandler(sentry_dns, level='ERROR'))
return logbook.NestedSetup(handlers) | Hivy formated logger |
def gen_token(cls):
""" 生成 access_token """
token = os.urandom(16)
token_time = int(time.time())
return {'token': token, 'token_time': token_time} | 生成 access_token |
def format_result(result):
"""Serialise Result"""
instance = None
error = None
if result["instance"] is not None:
instance = format_instance(result["instance"])
if result["error"] is not None:
error = format_error(result["error"])
result = {
"success": result["success"],
"plugin": format_plugin(result["plugin"]),
"instance": instance,
"error": error,
"records": format_records(result["records"]),
"duration": result["duration"]
}
if os.getenv("PYBLISH_SAFE"):
schema.validate(result, "result")
return result | Serialise Result |
def __audioread_load(path, offset, duration, dtype):
'''Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
'''
y = []
with audioread.audio_open(path) as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = int(np.round(sr_native * offset)) * n_channels
if duration is None:
s_end = np.inf
else:
s_end = s_start + (int(np.round(sr_native * duration))
* n_channels)
n = 0
for frame in input_file:
frame = util.buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n < s_start:
# offset is after the current frame
# keep reading
continue
if s_end < n_prev:
# we're off the end. stop reading
break
if s_end < n:
# the end is in this frame. crop.
frame = frame[:s_end - n_prev]
if n_prev <= s_start <= n:
# beginning is in this frame
frame = frame[(s_start - n_prev):]
# tack on the current frame
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels)).T
else:
y = np.empty(0, dtype=dtype)
return y, sr_native | Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results. |
def find(cls, key=None, **kwargs):
"""
Find an asset by key
E.g.
shopify.Asset.find('layout/theme.liquid', theme_id=99)
"""
if not key:
return super(Asset, cls).find(**kwargs)
params = {"asset[key]": key}
params.update(kwargs)
theme_id = params.get("theme_id")
path_prefix = "%s/themes/%s" % (cls.site, theme_id) if theme_id else cls.site
resource = cls.find_one("%s/assets.%s" % (path_prefix, cls.format.extension), **params)
if theme_id and resource:
resource._prefix_options["theme_id"] = theme_id
return resource | Find an asset by key
E.g.
shopify.Asset.find('layout/theme.liquid', theme_id=99) |
def ip():
"""Show ip address."""
ok, err = _hack_ip()
if not ok:
click.secho(click.style(err, fg='red'))
sys.exit(1)
click.secho(click.style(err, fg='green')) | Show ip address. |
def _mapped_std_streams(lookup_paths, streams=('stdin', 'stdout', 'stderr')):
"""Get a mapping of standard streams to given paths."""
# FIXME add device number too
standard_inos = {}
for stream in streams:
try:
stream_stat = os.fstat(getattr(sys, stream).fileno())
key = stream_stat.st_dev, stream_stat.st_ino
standard_inos[key] = stream
except Exception: # FIXME UnsupportedOperation
pass
# FIXME if not getattr(sys, stream).istty()
def stream_inos(paths):
"""Yield tuples with stats and path."""
for path in paths:
try:
stat = os.stat(path)
key = (stat.st_dev, stat.st_ino)
if key in standard_inos:
yield standard_inos[key], path
except FileNotFoundError: # pragma: no cover
pass
return dict(stream_inos(lookup_paths)) if standard_inos else {} | Get a mapping of standard streams to given paths. |
def sys_version(version_tuple):
"""
Set a temporary sys.version_info tuple
:param version_tuple: a fake sys.version_info tuple
"""
old_version = sys.version_info
sys.version_info = version_tuple
yield
sys.version_info = old_version | Set a temporary sys.version_info tuple
:param version_tuple: a fake sys.version_info tuple |
def get_content_commit_date(extensions, acceptance_callback=None,
root_dir='.'):
"""Get the datetime for the most recent commit to a project that
affected certain types of content.
Parameters
----------
extensions : sequence of 'str'
Extensions of files to consider in getting the most recent commit
date. For example, ``('rst', 'svg', 'png')`` are content extensions
for a Sphinx project. **Extension comparision is case sensitive.** add
uppercase variants to match uppercase extensions.
acceptance_callback : callable
Callable function whose sole argument is a file path, and returns
`True` or `False` depending on whether the file's commit date should
be considered or not. This callback is only run on files that are
included by ``extensions``. Thus this callback is a way to exclude
specific files that would otherwise be included by their extension.
root_dir : 'str`, optional
Only content contained within this root directory is considered.
This directory must be, or be contained by, a Git repository. This is
the current working directory by default.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found.
"""
logger = logging.getLogger(__name__)
def _null_callback(_):
return True
if acceptance_callback is None:
acceptance_callback = _null_callback
# Cache the repo object for each query
root_dir = os.path.abspath(root_dir)
repo = git.repo.base.Repo(path=root_dir, search_parent_directories=True)
# Iterate over all files with all file extensions, looking for the
# newest commit datetime.
newest_datetime = None
iters = [_iter_filepaths_with_extension(ext, root_dir=root_dir)
for ext in extensions]
for content_path in itertools.chain(*iters):
content_path = os.path.abspath(os.path.join(root_dir, content_path))
if acceptance_callback(content_path):
logger.debug('Found content path %r', content_path)
try:
commit_datetime = read_git_commit_timestamp_for_file(
content_path, repo=repo)
logger.debug('Commit timestamp of %r is %s',
content_path, commit_datetime)
except IOError:
logger.warning(
'Count not get commit for %r, skipping',
content_path)
continue
if not newest_datetime or commit_datetime > newest_datetime:
# Seed initial newest_datetime
# or set a newer newest_datetime
newest_datetime = commit_datetime
logger.debug('Newest commit timestamp is %s', newest_datetime)
logger.debug('Final commit timestamp is %s', newest_datetime)
if newest_datetime is None:
raise RuntimeError('No content files found in {}'.format(root_dir))
return newest_datetime | Get the datetime for the most recent commit to a project that
affected certain types of content.
Parameters
----------
extensions : sequence of 'str'
Extensions of files to consider in getting the most recent commit
date. For example, ``('rst', 'svg', 'png')`` are content extensions
for a Sphinx project. **Extension comparision is case sensitive.** add
uppercase variants to match uppercase extensions.
acceptance_callback : callable
Callable function whose sole argument is a file path, and returns
`True` or `False` depending on whether the file's commit date should
be considered or not. This callback is only run on files that are
included by ``extensions``. Thus this callback is a way to exclude
specific files that would otherwise be included by their extension.
root_dir : 'str`, optional
Only content contained within this root directory is considered.
This directory must be, or be contained by, a Git repository. This is
the current working directory by default.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found. |
def syllabify(word):
'''Syllabify the given word, whether simplex or complex.'''
compound = bool(re.search(r'(-| |=)', word))
syllabify = _syllabify_compound if compound else _syllabify_simplex
syllabifications = list(syllabify(word))
for word, rules in rank(syllabifications):
# post-process
word = str(replace_umlauts(word, put_back=True))
rules = rules[1:]
yield word, rules | Syllabify the given word, whether simplex or complex. |
def list_vnets(access_token, subscription_id):
'''List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Network/',
'/virtualNetworks?api-version=', NETWORK_API])
return do_get(endpoint, access_token) | List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties. |
def get_hex_color(layer_type):
"""
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
"""
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed] | Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block. |
def _get_fuzzy_tc_matches(text, full_text, options):
'''
Get the options that match the full text, then from each option
return only the individual words which have not yet been matched
which also match the text being tab-completed.
'''
print("text: {}, full: {}, options: {}".format(text, full_text, options))
# get the options which match the full text
matching_options = _get_fuzzy_matches(full_text, options)
# need to only return the individual words which:
# - match the 'text'
# - are not exclusively matched by other input in full_text
# - when matched, still allows all other input in full_text to be matched
# get the input tokens
input_tokens = full_text.split()
# remove one instance of the text to be matched
initial_tokens = input_tokens.remove(text)
# track the final matches:
final_matches = []
# find matches per option
for option in options:
option_tokens = option.split()
# get tokens which match the text
matches = [t for t in option_tokens if text in t]
# get input tokens which match one of the matches
input_tokens_which_match = [t for t in input_tokens for m in matches if t in m]
# if any input token ONLY matches a match, remove that match
for token in input_tokens_which_match:
token_matches = [t for t in option_tokens if token in t]
if len(token_matches) == 1:
match = token_matches[0]
if match in matches:
matches.remove(match)
# for the remaining matches, if the input tokens can be fuzzily matched without
# the match, it's ok to return it.
for match in matches:
# copy option tokens
option_tokens_minus_match = option_tokens[:]
# remove the match
option_tokens_minus_match.remove(match)
option_minus_match = ' '.join(option_tokens_minus_match)
if _get_fuzzy_matches(' '.join(input_tokens), [option_minus_match]):
if match not in final_matches:
final_matches.append(match)
return final_matches | Get the options that match the full text, then from each option
return only the individual words which have not yet been matched
which also match the text being tab-completed. |
def remove(self):
""" remove the environment """
try:
self.phase = PHASE.REMOVE
self.logger.info("Removing environment %s..." % self.namespace)
self.instantiate_features()
self._specialize()
for feature in self.features.run_order:
try:
self.run_action(feature, 'sync')
except FormulaException:
# continue trying to remove any remaining features.
pass
self.clear_all()
self.directory.remove()
self.injections.commit()
if self.error_occured:
self.logger.error(warning_template)
self.logger.error(REMOVE_WARNING)
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
et, ei, tb = sys.exc_info()
reraise(et, ei, tb) | remove the environment |
def tai(self, year=None, month=1, day=1, hour=0, minute=0, second=0.0,
jd=None):
"""Build a `Time` from a TAI calendar date.
Supply the International Atomic Time (TAI) as a proleptic
Gregorian calendar date:
>>> t = ts.tai(2014, 1, 18, 1, 35, 37.5)
>>> t.tai
2456675.56640625
>>> t.tai_calendar()
(2014, 1, 18, 1, 35, 37.5)
"""
if jd is not None:
tai = jd
else:
tai = julian_date(
_to_array(year), _to_array(month), _to_array(day),
_to_array(hour), _to_array(minute), _to_array(second),
)
return self.tai_jd(tai) | Build a `Time` from a TAI calendar date.
Supply the International Atomic Time (TAI) as a proleptic
Gregorian calendar date:
>>> t = ts.tai(2014, 1, 18, 1, 35, 37.5)
>>> t.tai
2456675.56640625
>>> t.tai_calendar()
(2014, 1, 18, 1, 35, 37.5) |
def read_composite_array(fname, sep=','):
r"""
Convert a CSV file with header into an ArrayWrapper object.
>>> from openquake.baselib.general import gettemp
>>> fname = gettemp('PGA:3,PGV:2,avg:1\n'
... '.1 .2 .3,.4 .5,.6\n')
>>> print(read_composite_array(fname).array) # array of shape (1,)
[([0.1, 0.2, 0.3], [0.4, 0.5], [0.6])]
"""
with open(fname) as f:
header = next(f)
if header.startswith('#'): # the first line is a comment, skip it
attrs = dict(parse_comment(header[1:]))
header = next(f)
else:
attrs = {}
transheader = htranslator.read(header.split(sep))
fields, dtype = parse_header(transheader)
ts_pairs = [] # [(type, shape), ...]
for name in fields:
dt = dtype.fields[name][0]
ts_pairs.append((dt.subdtype[0].type if dt.subdtype else dt.type,
dt.shape))
col_ids = list(range(1, len(ts_pairs) + 1))
num_columns = len(col_ids)
records = []
col, col_id = '', 0
for i, line in enumerate(f, 2):
row = line.split(sep)
if len(row) != num_columns:
raise InvalidFile(
'expected %d columns, found %d in file %s, line %d' %
(num_columns, len(row), fname, i))
try:
record = []
for (ntype, shape), col, col_id in zip(ts_pairs, row, col_ids):
record.append(_cast(col, ntype, shape, i, fname))
records.append(tuple(record))
except Exception as e:
raise InvalidFile(
'Could not cast %r in file %s, line %d, column %d '
'using %s: %s' % (col, fname, i, col_id,
(ntype.__name__,) + shape, e))
return ArrayWrapper(numpy.array(records, dtype), attrs) | r"""
Convert a CSV file with header into an ArrayWrapper object.
>>> from openquake.baselib.general import gettemp
>>> fname = gettemp('PGA:3,PGV:2,avg:1\n'
... '.1 .2 .3,.4 .5,.6\n')
>>> print(read_composite_array(fname).array) # array of shape (1,)
[([0.1, 0.2, 0.3], [0.4, 0.5], [0.6])] |
def add_btn_ok(self,label_ok):
"""
Adds an OK button to allow the user to exit the dialog.
This widget can be triggered by setting the label ``label_ok`` to a string.
This widget will be mostly centered on the screen, but below the main label
by the double of its height.
"""
# OK Button
self.wbtn_ok = button.Button("btn_ok",self,self.window,self.peng,
pos=lambda sw,sh, bw,bh: (sw/2-bw/2,sh/2-bh/2-bh*2),
size=[0,0],
label=label_ok,
borderstyle=self.borderstyle
)
self.wbtn_ok.size = lambda sw,sh: (self.wbtn_ok._label.font_size*8,self.wbtn_ok._label.font_size*2)
self.addWidget(self.wbtn_ok)
def f():
self.doAction("click_ok")
self.exitDialog()
self.wbtn_ok.addAction("click",f) | Adds an OK button to allow the user to exit the dialog.
This widget can be triggered by setting the label ``label_ok`` to a string.
This widget will be mostly centered on the screen, but below the main label
by the double of its height. |
def convert_csv_with_dialog_paths(csv_file):
"""
Converts CSV file with comma separated paths to filesystem paths.
:param csv_file:
:return:
"""
def convert_line_to_path(line):
file, dir = map(lambda x: x.strip(), line.split(","))
return os.path.join(dir, file)
return map(convert_line_to_path, csv_file) | Converts CSV file with comma separated paths to filesystem paths.
:param csv_file:
:return: |
def _parse_scram_response(response):
"""Split a scram response into key, value pairs."""
return dict(item.split(b"=", 1) for item in response.split(b",")) | Split a scram response into key, value pairs. |
def _format_msg(text, width, indent=0, prefix=""):
r"""
Format exception message.
Replace newline characters \n with ``\n``, ` with \` and then wrap text as
needed
"""
text = repr(text).replace("`", "\\`").replace("\\n", " ``\\n`` ")
sindent = " " * indent if not prefix else prefix
wrapped_text = textwrap.wrap(text, width, subsequent_indent=sindent)
# [1:-1] eliminates quotes generated by repr in first line
return ("\n".join(wrapped_text))[1:-1].rstrip() | r"""
Format exception message.
Replace newline characters \n with ``\n``, ` with \` and then wrap text as
needed |
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast}) | Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record |
def from_args(cls, args, project_profile_name=None):
"""Given the raw profiles as read from disk and the name of the desired
profile if specified, return the profile component of the runtime
config.
:param args argparse.Namespace: The arguments as parsed from the cli.
:param project_profile_name Optional[str]: The profile name, if
specified in a project.
:raises DbtProjectError: If there is no profile name specified in the
project or the command line arguments, or if the specified profile
is not found
:raises DbtProfileError: If the profile is invalid or missing, or the
target could not be found.
:returns Profile: The new Profile object.
"""
cli_vars = parse_cli_vars(getattr(args, 'vars', '{}'))
threads_override = getattr(args, 'threads', None)
target_override = getattr(args, 'target', None)
raw_profiles = read_profile(args.profiles_dir)
profile_name = cls.pick_profile_name(args.profile,
project_profile_name)
return cls.from_raw_profiles(
raw_profiles=raw_profiles,
profile_name=profile_name,
cli_vars=cli_vars,
target_override=target_override,
threads_override=threads_override
) | Given the raw profiles as read from disk and the name of the desired
profile if specified, return the profile component of the runtime
config.
:param args argparse.Namespace: The arguments as parsed from the cli.
:param project_profile_name Optional[str]: The profile name, if
specified in a project.
:raises DbtProjectError: If there is no profile name specified in the
project or the command line arguments, or if the specified profile
is not found
:raises DbtProfileError: If the profile is invalid or missing, or the
target could not be found.
:returns Profile: The new Profile object. |
def start(self):
"""Start the component's event loop (thread-safe).
After the event loop is started the Qt thread calls the
component's :py:meth:`~Component.start_event` method, then calls
its :py:meth:`~Component.new_frame_event` and
:py:meth:`~Component.new_config_event` methods as required until
:py:meth:`~Component.stop` is called. Finally the component's
:py:meth:`~Component.stop_event` method is called before the
event loop terminates.
"""
if self._running:
raise RuntimeError('Component {} is already running'.format(
self._owner.__class__.__name__))
self._running = True
self.queue_command(self._owner.start_event)
# process any events that arrived before we started
while self._incoming:
self.queue_command(self._incoming.popleft()) | Start the component's event loop (thread-safe).
After the event loop is started the Qt thread calls the
component's :py:meth:`~Component.start_event` method, then calls
its :py:meth:`~Component.new_frame_event` and
:py:meth:`~Component.new_config_event` methods as required until
:py:meth:`~Component.stop` is called. Finally the component's
:py:meth:`~Component.stop_event` method is called before the
event loop terminates. |
def format_command(
command_args, # type: List[str]
command_output, # type: str
):
# type: (...) -> str
"""
Format command information for logging.
"""
text = 'Command arguments: {}\n'.format(command_args)
if not command_output:
text += 'Command output: None'
elif logger.getEffectiveLevel() > logging.DEBUG:
text += 'Command output: [use --verbose to show]'
else:
if not command_output.endswith('\n'):
command_output += '\n'
text += (
'Command output:\n{}'
'-----------------------------------------'
).format(command_output)
return text | Format command information for logging. |
def subset(args):
"""
%prog subset blastfile qbedfile sbedfile
Extract blast hits between given query and subject chrs.
If --qchrs or --schrs is not given, then all chrs from q/s genome will
be included. However one of --qchrs and --schrs must be specified.
Otherwise the script will do nothing.
"""
p = OptionParser(subset.__doc__)
p.add_option("--qchrs", default=None,
help="query chrs to extract, comma sep [default: %default]")
p.add_option("--schrs", default=None,
help="subject chrs to extract, comma sep [default: %default]")
p.add_option("--convert", default=False, action="store_true",
help="convert accns to chr_rank [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
blastfile, qbedfile, sbedfile = args
qchrs = opts.qchrs
schrs = opts.schrs
assert qchrs or schrs, p.print_help()
convert = opts.convert
outfile = blastfile + "."
if qchrs:
outfile += qchrs + "."
qchrs = set(qchrs.split(","))
else:
qchrs = set(Bed(qbedfile).seqids)
if schrs:
schrs = set(schrs.split(","))
if qbedfile != sbedfile or qchrs != schrs:
outfile += ",".join(schrs) + "."
else:
schrs = set(Bed(sbedfile).seqids)
outfile += "blast"
qo = Bed(qbedfile).order
so = Bed(sbedfile).order
fw = must_open(outfile, "w")
for b in Blast(blastfile):
q, s = b.query, b.subject
if qo[q][1].seqid in qchrs and so[s][1].seqid in schrs:
if convert:
b.query = qo[q][1].seqid + "_" + "{0:05d}".format(qo[q][0])
b.subject = so[s][1].seqid + "_" + "{0:05d}".format(so[s][0])
print(b, file=fw)
fw.close()
logging.debug("Subset blastfile written to `{0}`".format(outfile)) | %prog subset blastfile qbedfile sbedfile
Extract blast hits between given query and subject chrs.
If --qchrs or --schrs is not given, then all chrs from q/s genome will
be included. However one of --qchrs and --schrs must be specified.
Otherwise the script will do nothing. |
def write(self, chunk, offset):
"""
Write chunk to file at specified position
Return 0 if OK, else -1
"""
return lib.zfile_write(self._as_parameter_, chunk, offset) | Write chunk to file at specified position
Return 0 if OK, else -1 |
def pywt_wavelet(wavelet):
"""Convert ``wavelet`` to a `pywt.Wavelet` instance."""
if isinstance(wavelet, pywt.Wavelet):
return wavelet
else:
return pywt.Wavelet(wavelet) | Convert ``wavelet`` to a `pywt.Wavelet` instance. |
def get_stream_url(self, session_id, stream_id=None):
""" this method returns the url to get streams information """
url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream'
if stream_id:
url = url + '/' + stream_id
return url | this method returns the url to get streams information |
def insert(self, index, p_object):
"""
Insert an element to a list
"""
validated_value = self.get_validated_object(p_object)
if validated_value is not None:
self.__modified_data__.insert(index, validated_value) | Insert an element to a list |
def _clone(self, cid):
"""
Create a temporary image snapshot from a given cid.
Temporary image snapshots are marked with a sentinel label
so that they can be cleaned on unmount.
"""
try:
iid = self.client.commit(
container=cid,
conf={
'Labels': {
'io.projectatomic.Temporary': 'true'
}
}
)['Id']
except docker.errors.APIError as ex:
raise MountError(str(ex))
self.tmp_image = iid
return self._create_temp_container(iid) | Create a temporary image snapshot from a given cid.
Temporary image snapshots are marked with a sentinel label
so that they can be cleaned on unmount. |
def wrap_rtx(packet, payload_type, sequence_number, ssrc):
"""
Create a retransmission packet from a lost packet.
"""
rtx = RtpPacket(
payload_type=payload_type,
marker=packet.marker,
sequence_number=sequence_number,
timestamp=packet.timestamp,
ssrc=ssrc,
payload=pack('!H', packet.sequence_number) + packet.payload)
rtx.csrc = packet.csrc
rtx.extensions = packet.extensions
return rtx | Create a retransmission packet from a lost packet. |
def change_type(self, bucket, key, storage_type):
"""修改文件的存储类型
修改文件的存储类型为普通存储或者是低频存储,参考文档:
https://developer.qiniu.com/kodo/api/3710/modify-the-file-type
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
storage_type: 待操作资源存储类型,0为普通存储,1为低频存储
"""
resource = entry(bucket, key)
return self.__rs_do('chtype', resource, 'type/{0}'.format(storage_type)) | 修改文件的存储类型
修改文件的存储类型为普通存储或者是低频存储,参考文档:
https://developer.qiniu.com/kodo/api/3710/modify-the-file-type
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
storage_type: 待操作资源存储类型,0为普通存储,1为低频存储 |
def receive_external(self, http_verb, host, url, http_headers):
"""
Retrieve a streaming request for a file.
:param http_verb: str GET is only supported right now
:param host: str host we are requesting the file from
:param url: str url to ask the host for
:param http_headers: object headers to send with the request
:return: requests.Response containing the successful result
"""
if http_verb == 'GET':
return self.http.get(host + url, headers=http_headers, stream=True)
else:
raise ValueError("Unsupported http_verb:" + http_verb) | Retrieve a streaming request for a file.
:param http_verb: str GET is only supported right now
:param host: str host we are requesting the file from
:param url: str url to ask the host for
:param http_headers: object headers to send with the request
:return: requests.Response containing the successful result |
def cheat(num):
"""View the answer to a problem."""
# Define solution before echoing in case solution does not exist
solution = click.style(Problem(num).solution, bold=True)
click.confirm("View answer to problem %i?" % num, abort=True)
click.echo("The answer to problem {} is {}.".format(num, solution)) | View the answer to a problem. |
def write_tsv(self, path):
"""Write the database to a tab-delimited text file.
Parameters
----------
path: str
The path name of the file.
Returns
-------
None
"""
with open(path, 'wb') as ofh:
writer = csv.writer(
ofh, dialect='excel-tab',
quoting=csv.QUOTE_NONE, lineterminator=os.linesep
)
for gs in self._gene_sets.values():
writer.writerow(gs.to_list()) | Write the database to a tab-delimited text file.
Parameters
----------
path: str
The path name of the file.
Returns
-------
None |
def external_commands(self):
"""Get the external commands from the daemon
Use a lock for this function to protect
:return: serialized external command list
:rtype: str
"""
res = []
with self.app.external_commands_lock:
for cmd in self.app.get_external_commands():
res.append(cmd.serialize())
return res | Get the external commands from the daemon
Use a lock for this function to protect
:return: serialized external command list
:rtype: str |
def linspace2(a, b, n, dtype=None):
"""similar to numpy.linspace but excluding the boundaries
this is the normal numpy.linspace:
>>> print linspace(0,1,5)
[ 0. 0.25 0.5 0.75 1. ]
and this gives excludes the boundaries:
>>> print linspace2(0,1,5)
[ 0.1 0.3 0.5 0.7 0.9]
"""
a = linspace(a, b, n + 1, dtype=dtype)[:-1]
if len(a) > 1:
diff01 = ((a[1] - a[0]) / 2).astype(a.dtype)
a += diff01
return a | similar to numpy.linspace but excluding the boundaries
this is the normal numpy.linspace:
>>> print linspace(0,1,5)
[ 0. 0.25 0.5 0.75 1. ]
and this gives excludes the boundaries:
>>> print linspace2(0,1,5)
[ 0.1 0.3 0.5 0.7 0.9] |
def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value into a new Element object and return it."""
if self._nested is None:
state.raise_error(InvalidRootProcessor,
'Cannot directly serialize a non-nested array "{}"'
.format(self.alias))
if not value and self.required:
state.raise_error(MissingValue, 'Missing required array: "{}"'.format(
self.alias))
start_element, end_element = _element_path_create_new(self._nested)
self._serialize(end_element, value, state)
return start_element | Serialize the value into a new Element object and return it. |
def clean_axis(axis):
"""Remove ticks, tick labels, and frame from axis"""
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
for spine in list(axis.spines.values()):
spine.set_visible(False) | Remove ticks, tick labels, and frame from axis |
def load_from_stream(self, stream, container, **opts):
"""
:param stream: XML file or file-like object
:param container: callble to make a container object
:param opts: optional keyword parameters to be sanitized
:return: Dict-like object holding config parameters
"""
root = ET.parse(stream).getroot()
path = anyconfig.utils.get_path_from_stream(stream)
nspaces = _namespaces_from_file(path)
return root_to_container(root, container=container,
nspaces=nspaces, **opts) | :param stream: XML file or file-like object
:param container: callble to make a container object
:param opts: optional keyword parameters to be sanitized
:return: Dict-like object holding config parameters |
def square_distance(a: Square, b: Square) -> int:
"""
Gets the distance (i.e., the number of king steps) from square *a* to *b*.
"""
return max(abs(square_file(a) - square_file(b)), abs(square_rank(a) - square_rank(b))) | Gets the distance (i.e., the number of king steps) from square *a* to *b*. |
def script_current_send(self, seq, force_mavlink1=False):
'''
This message informs about the currently active SCRIPT.
seq : Active Sequence (uint16_t)
'''
return self.send(self.script_current_encode(seq), force_mavlink1=force_mavlink1) | This message informs about the currently active SCRIPT.
seq : Active Sequence (uint16_t) |
def chat_update_message(self, channel, text, timestamp, **params):
"""chat.update
This method updates a message.
Required parameters:
`channel`: Channel containing the message to be updated. (e.g: "C1234567890")
`text`: New text for the message, using the default formatting rules. (e.g: "Hello world")
`timestamp`: Timestamp of the message to be updated (e.g: "1405894322.002768")
https://api.slack.com/methods/chat.update
"""
method = 'chat.update'
if self._channel_is_name(channel):
# chat.update only takes channel ids (not channel names)
channel = self.channel_name_to_id(channel)
params.update({
'channel': channel,
'text': text,
'ts': timestamp,
})
return self._make_request(method, params) | chat.update
This method updates a message.
Required parameters:
`channel`: Channel containing the message to be updated. (e.g: "C1234567890")
`text`: New text for the message, using the default formatting rules. (e.g: "Hello world")
`timestamp`: Timestamp of the message to be updated (e.g: "1405894322.002768")
https://api.slack.com/methods/chat.update |
def _as_rdf_xml(self, ns):
"""
Return identity details for the element as XML nodes
"""
self.rdf_identity = self._get_identity(ns)
elements = []
elements.append(ET.Element(NS('sbol', 'persistentIdentity'),
attrib={NS('rdf', 'resource'):
self._get_persistent_identitity(ns)}))
if self.name is not None:
name = ET.Element(NS('dcterms', 'title'))
name.text = self.name
elements.append(name)
if self.display_id is not None:
display_id = ET.Element(NS('sbol', 'displayId'))
display_id.text = self.display_id
elements.append(display_id)
if self.version is not None:
version = ET.Element(NS('sbol', 'version'))
version.text = self.version
elements.append(version)
if self.was_derived_from is not None:
elements.append(ET.Element(NS('prov', 'wasDerivedFrom'),
attrib={NS('rdf', 'resource'): self.was_derived_from}))
if self.description is not None:
description = ET.Element(NS('dcterms', 'description'))
description.text = self.description
elements.append(description)
for a in self.annotations:
elements.append(a._as_rdf_xml(ns))
return elements | Return identity details for the element as XML nodes |
def dump(self):
"""Serialize a test case to a string."""
result = list(self.output_lines())
if self.locked:
result.append('# locked')
if self.choices:
for choice in self.choices:
result.append('# choice: ' + choice)
if self.explanation:
result.append('# explanation: ' + self.explanation)
return '\n'.join(result) | Serialize a test case to a string. |
def password_hash(password):
"""Hash the password, using bcrypt+sha256.
.. versionchanged:: 1.1.0
:param str password: Password in plaintext
:return: password hash
:rtype: str
"""
try:
return bcrypt_sha256.encrypt(password)
except TypeError:
return bcrypt_sha256.encrypt(password.decode('utf-8')) | Hash the password, using bcrypt+sha256.
.. versionchanged:: 1.1.0
:param str password: Password in plaintext
:return: password hash
:rtype: str |
def sill(self):
""" get the sill of the GeoStruct
Return
------
sill : float
the sill of the (nested) GeoStruct, including nugget and contribution
from each variogram
"""
sill = self.nugget
for v in self.variograms:
sill += v.contribution
return sill | get the sill of the GeoStruct
Return
------
sill : float
the sill of the (nested) GeoStruct, including nugget and contribution
from each variogram |
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs | :desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list |
def firsttime(self):
""" sets it as already done"""
self.config.set('DEFAULT', 'firsttime', 'no')
if self.cli_config.getboolean('core', 'collect_telemetry', fallback=False):
print(PRIVACY_STATEMENT)
else:
self.cli_config.set_value('core', 'collect_telemetry', ask_user_for_telemetry())
self.update() | sets it as already done |
def GetRemainder(self):
"""Method to get the remainder of the buffered XML. this
method stops the parser, set its state to End Of File and
return the input stream with what is left that the parser
did not use. The implementation is not good, the parser
certainly procgressed past what's left in reader->input,
and there is an allocation problem. Best would be to
rewrite it differently. """
ret = libxml2mod.xmlTextReaderGetRemainder(self._o)
if ret is None:raise treeError('xmlTextReaderGetRemainder() failed')
__tmp = inputBuffer(_obj=ret)
return __tmp | Method to get the remainder of the buffered XML. this
method stops the parser, set its state to End Of File and
return the input stream with what is left that the parser
did not use. The implementation is not good, the parser
certainly procgressed past what's left in reader->input,
and there is an allocation problem. Best would be to
rewrite it differently. |
def get_ticker(self, symbol=None):
"""Get symbol tick
https://docs.kucoin.com/#get-ticker
:param symbol: (optional) Name of symbol e.g. KCS-BTC
:type symbol: string
.. code:: python
all_ticks = client.get_ticker()
ticker = client.get_ticker('ETH-BTC')
:returns: ApiResponse
.. code:: python
{
"sequence": "1545825031840", # now sequence
"price": "3494.367783", # last trade price
"size": "0.05027185", # last trade size
"bestBid": "3494.367783", # best bid price
"bestBidSize": "2.60323254", # size at best bid price
"bestAsk": "3499.12", # best ask price
"bestAskSize": "0.01474011" # size at best ask price
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {}
tick_path = 'market/allTickers'
if symbol is not None:
tick_path = 'market/orderbook/level1'
data = {
'symbol': symbol
}
return self._get(tick_path, False, data=data) | Get symbol tick
https://docs.kucoin.com/#get-ticker
:param symbol: (optional) Name of symbol e.g. KCS-BTC
:type symbol: string
.. code:: python
all_ticks = client.get_ticker()
ticker = client.get_ticker('ETH-BTC')
:returns: ApiResponse
.. code:: python
{
"sequence": "1545825031840", # now sequence
"price": "3494.367783", # last trade price
"size": "0.05027185", # last trade size
"bestBid": "3494.367783", # best bid price
"bestBidSize": "2.60323254", # size at best bid price
"bestAsk": "3499.12", # best ask price
"bestAskSize": "0.01474011" # size at best ask price
}
:raises: KucoinResponseException, KucoinAPIException |
def _disconnect(self, mqttc, userdata, rc):
"""
The callback for when a DISCONNECT occurs.
:param mqttc: The client instance for this callback
:param userdata: The private userdata for the mqtt client. Not used in Polyglot
:param rc: Result code of connection, 0 = Graceful, anything else is unclean
"""
self.connected = False
if rc != 0:
LOGGER.info("MQTT Unexpected disconnection. Trying reconnect.")
try:
self._mqttc.reconnect()
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
LOGGER.error("MQTT Connection error: " + message)
else:
LOGGER.info("MQTT Graceful disconnection.") | The callback for when a DISCONNECT occurs.
:param mqttc: The client instance for this callback
:param userdata: The private userdata for the mqtt client. Not used in Polyglot
:param rc: Result code of connection, 0 = Graceful, anything else is unclean |
def get_requests(self):
"""Get all the new requests that were found in the response.
Returns:
list(:class:`nyawc.http.Request`): A list of new requests that were found.
"""
requests = self.derived_get_requests()
for request in requests:
request.url = URLHelper.remove_hash(request.url)
return requests | Get all the new requests that were found in the response.
Returns:
list(:class:`nyawc.http.Request`): A list of new requests that were found. |
def build_request(self, input_data=None, *args, **kwargs):
"""
Builds request
:param input_data:
:param args:
:param kwargs:
:return:
"""
if input_data is not None:
self.input_data = input_data
if self.input_data is None:
raise ValueError('Input data is None')
if self.uo is None:
raise ValueError('UO is None')
self.request = RequestHolder()
self.request.nonce = get_random_vector(EBConsts.FRESHNESS_NONCE_LEN)
self.request.api_object = EBUtils.build_api_object(self.uo)
self.request.endpoint = self.uo.resolve_endpoint()
self.request.configuration = self.configuration
self.request.api_method = EBConsts.REQUEST_PROCESS_DATA
# Build plaintext plain_buffer
plain_buffer = \
to_bytes(31, 1) + to_bytes(self.uo.uo_id, 4) + \
to_bytes(self.request.nonce, EBConsts.FRESHNESS_NONCE_LEN) + to_bytes(self.input_data)
plain_buffer = PKCS7.pad(plain_buffer)
# Encrypt-then-mac
ciphertext = aes_enc(self.uo.enc_key, plain_buffer)
mac = cbc_mac(self.uo.mac_key, ciphertext)
# Result request body
self.request.body = {
"data": "Packet0_%s_0000%s" % (EBUtils.get_request_type(self.uo), to_hex(ciphertext + mac))}
return self.request | Builds request
:param input_data:
:param args:
:param kwargs:
:return: |
def chunks(iterable, size):
"""
Splits a very large list into evenly sized chunks.
Returns an iterator of lists that are no more than the size passed in.
"""
it = iter(iterable)
item = list(islice(it, size))
while item:
yield item
item = list(islice(it, size)) | Splits a very large list into evenly sized chunks.
Returns an iterator of lists that are no more than the size passed in. |
def spectrum(self, function='geoid', lmax=None, unit='per_l', base=10.):
"""
Return the spectrum as a function of spherical harmonic degree.
Usage
-----
spectrum, [error_spectrum] = x.spectrum([function, lmax, unit, base])
Returns
-------
spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectrum, where lmax is the maximum
spherical harmonic degree.
error_spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the error_spectrum (if the attribute errors
is not None).
Parameters
----------
function : str, optional, default = 'geoid'
The type of power spectrum to return: 'potential' for the
gravitational potential in m2/s2, 'geoid' for the geoid in m,
'radial' for the radial gravity in m/s2, or 'total' for the total
gravitational field in m/s2.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to return.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This method returns the power spectrum of the class instance, where the
type of function is defined by the function parameter: 'potential' for
the gravitational potential, 'geoid' for the geoid, 'radial' for
the radial gravity, or 'total' for the total gravitational field. In
all cases, the total power of the function is defined as the integral
of the function squared over all space, divided by the area the
function spans. If the mean of the function is zero, this is equivalent
to the variance of the function.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
if function.lower() not in ('potential', 'geoid', 'radial', 'total'):
raise ValueError(
"function must be of type 'potential', 'geoid', 'radial', or "
"'total'. Provided value was {:s}".format(repr(function))
)
s = _spectrum(self.coeffs, normalization=self.normalization,
convention='power', unit=unit, base=base, lmax=lmax)
if self.errors is not None:
es = _spectrum(self.errors, normalization=self.normalization,
convention='power', unit=unit, base=base, lmax=lmax)
if function.lower() == 'potential':
s *= (self.gm / self.r0)**2
if self.errors is not None:
es *= (self.gm / self.r0)**2
elif function.lower() == 'geoid':
s *= self.r0**2
if self.errors is not None:
es *= self.r0**2
elif function.lower() == 'radial':
degrees = _np.arange(len(s))
s *= (self.gm * (degrees + 1) / self.r0**2)**2
if self.errors is not None:
es *= (self.gm * (degrees + 1) / self.r0**2)**2
elif function.lower() == 'total':
degrees = _np.arange(len(s))
s *= (self.gm / self.r0**2)**2 * (degrees + 1) * (2 * degrees + 1)
if self.errors is not None:
es *= (self.gm / self.r0**2)**2 * (degrees + 1) * \
(2 * degrees + 1)
if self.errors is not None:
return s, es
else:
return s | Return the spectrum as a function of spherical harmonic degree.
Usage
-----
spectrum, [error_spectrum] = x.spectrum([function, lmax, unit, base])
Returns
-------
spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectrum, where lmax is the maximum
spherical harmonic degree.
error_spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the error_spectrum (if the attribute errors
is not None).
Parameters
----------
function : str, optional, default = 'geoid'
The type of power spectrum to return: 'potential' for the
gravitational potential in m2/s2, 'geoid' for the geoid in m,
'radial' for the radial gravity in m/s2, or 'total' for the total
gravitational field in m/s2.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to return.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This method returns the power spectrum of the class instance, where the
type of function is defined by the function parameter: 'potential' for
the gravitational potential, 'geoid' for the geoid, 'radial' for
the radial gravity, or 'total' for the total gravitational field. In
all cases, the total power of the function is defined as the integral
of the function squared over all space, divided by the area the
function spans. If the mean of the function is zero, this is equivalent
to the variance of the function.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a). |
def _clean_up_columns(
self):
"""clean up columns
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
sqlQueries = [
"update tcs_helper_catalogue_tables_info set old_table_name = table_name where old_table_name is null;",
"update tcs_helper_catalogue_tables_info set version_number = 'stream' where table_name like '%%stream' and version_number is null;",
"""update tcs_helper_catalogue_tables_info set in_ned = 0 where table_name like '%%stream' and in_ned is null;""",
"""update tcs_helper_catalogue_tables_info set vizier_link = 0 where table_name like '%%stream' and vizier_link is null;""",
"update tcs_helper_catalogue_views_info set old_view_name = view_name where old_view_name is null;",
]
for sqlQuery in sqlQueries:
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
# VIEW OBJECT TYPES
sqlQuery = u"""
SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and object_type is null;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
view_name = row["view_name"]
object_type = view_name.replace("tcs_view_", "").split("_")[0]
sqlQuery = u"""
update tcs_helper_catalogue_views_info set object_type = "%(object_type)s" where view_name = "%(view_name)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
# MASTER TABLE ID FOR VIEWS
sqlQuery = u"""
SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and table_id is null;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
view_name = row["view_name"]
table_name = view_name.replace("tcs_view_", "").split("_")[1:]
table_name = ("_").join(table_name)
table_name = "tcs_cat_%(table_name)s" % locals()
sqlQuery = u"""
update tcs_helper_catalogue_views_info set table_id = (select id from tcs_helper_catalogue_tables_info where table_name = "%(table_name)s") where view_name = "%(view_name)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None | clean up columns
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring |
def lpc(blk, order=None):
"""
Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object,
the analysis whitening filter. This implementation uses the autocorrelation
method, using the Levinson-Durbin algorithm or Numpy pseudo-inverse for
linear system solving, when needed.
Parameters
----------
blk :
An iterable with well-defined length. Don't use this function with Stream
objects!
order :
The order of the resulting ZFilter object. Defaults to ``len(blk) - 1``.
Returns
-------
A FIR filter, as a ZFilter object. The mean squared error over the given
block is in its "error" attribute.
Hint
----
See ``lpc.kautocor`` example, which should apply equally for this strategy.
See Also
--------
levinson_durbin :
Levinson-Durbin algorithm for solving Yule-Walker equations (Toeplitz
matrix linear system).
lpc.nautocor:
LPC coefficients from linear system solved with Numpy pseudo-inverse.
lpc.kautocor:
LPC coefficients obtained with Levinson-Durbin algorithm.
"""
if order < 100:
return lpc.nautocor(blk, order)
try:
return lpc.kautocor(blk, order)
except ParCorError:
return lpc.nautocor(blk, order) | Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object,
the analysis whitening filter. This implementation uses the autocorrelation
method, using the Levinson-Durbin algorithm or Numpy pseudo-inverse for
linear system solving, when needed.
Parameters
----------
blk :
An iterable with well-defined length. Don't use this function with Stream
objects!
order :
The order of the resulting ZFilter object. Defaults to ``len(blk) - 1``.
Returns
-------
A FIR filter, as a ZFilter object. The mean squared error over the given
block is in its "error" attribute.
Hint
----
See ``lpc.kautocor`` example, which should apply equally for this strategy.
See Also
--------
levinson_durbin :
Levinson-Durbin algorithm for solving Yule-Walker equations (Toeplitz
matrix linear system).
lpc.nautocor:
LPC coefficients from linear system solved with Numpy pseudo-inverse.
lpc.kautocor:
LPC coefficients obtained with Levinson-Durbin algorithm. |
def upgradeShare1to2(oldShare):
"Upgrader from Share version 1 to version 2."
sharedInterfaces = []
attrs = set(oldShare.sharedAttributeNames.split(u','))
for iface in implementedBy(oldShare.sharedItem.__class__):
if set(iface) == attrs or attrs == set('*'):
sharedInterfaces.append(iface)
newShare = oldShare.upgradeVersion('sharing_share', 1, 2,
shareID=oldShare.shareID,
sharedItem=oldShare.sharedItem,
sharedTo=oldShare.sharedTo,
sharedInterfaces=sharedInterfaces)
return newShare | Upgrader from Share version 1 to version 2. |
def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None):
"""See Also: setObsoletedByResponse()
Args:
pid:
obsoletedByPid:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.setObsoletedByResponse(
pid, obsoletedByPid, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: setObsoletedByResponse()
Args:
pid:
obsoletedByPid:
serialVersion:
vendorSpecific:
Returns: |
def set_roots(self, uproot_with=None):
''' Set the roots of the tree in the os environment
Parameters:
uproot_with (str):
A new TREE_DIR path used to override an existing TREE_DIR environment variable
'''
# Check for TREE_DIR
self.treedir = os.environ.get('TREE_DIR', None) if not uproot_with else uproot_with
if not self.treedir:
treefilepath = os.path.dirname(os.path.abspath(__file__))
if 'python/' in treefilepath:
self.treedir = treefilepath.rsplit('/', 2)[0]
else:
self.treedir = treefilepath
self.treedir = treefilepath
os.environ['TREE_DIR'] = self.treedir
# Check sas_base_dir
if 'SAS_BASE_DIR' in os.environ:
self.sasbasedir = os.environ["SAS_BASE_DIR"]
else:
self.sasbasedir = os.path.expanduser('~/sas')
# make the directories
if not os.path.isdir(self.sasbasedir):
os.makedirs(self.sasbasedir) | Set the roots of the tree in the os environment
Parameters:
uproot_with (str):
A new TREE_DIR path used to override an existing TREE_DIR environment variable |
def write(self, args): # pylint: disable=no-self-use
""" writes the progres """
ShellProgressView.done = False
message = args.get('message', '')
percent = args.get('percent', None)
if percent:
ShellProgressView.progress_bar = _format_value(message, percent)
if int(percent) == 1:
ShellProgressView.progress_bar = None
ShellProgressView.progress = message | writes the progres |
def like_shared_file(self, sharekey=None):
""" 'Like' a SharedFile. mlkshk doesn't allow you to unlike a
sharedfile, so this is ~~permanent~~.
Args:
sharekey (str): Sharekey for the file you want to 'like'.
Returns:
Either a SharedFile on success, or an exception on error.
"""
if not sharekey:
raise Exception(
"You must specify a sharekey of the file you"
"want to 'like'.")
endpoint = '/api/sharedfile/{sharekey}/like'.format(sharekey=sharekey)
data = self._make_request("POST", endpoint=endpoint, data=None)
try:
sf = SharedFile.NewFromJSON(data)
sf.liked = True
return sf
except:
raise Exception("{0}".format(data['error'])) | 'Like' a SharedFile. mlkshk doesn't allow you to unlike a
sharedfile, so this is ~~permanent~~.
Args:
sharekey (str): Sharekey for the file you want to 'like'.
Returns:
Either a SharedFile on success, or an exception on error. |
def complete_info(self, text, line, begidx, endidx):
"""completion for info command"""
opts = self.INFO_OPTS
if not text:
completions = opts
else:
completions = [f
for f in opts
if f.startswith(text)
]
return completions | completion for info command |
def dlabfs(handle):
"""
Begin a forward segment search in a DLA file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlabfs_c.html
:param handle: Handle of open DLA file.
:type handle: int
:return: Descriptor of next segment in DLA file
:rtype: spiceypy.utils.support_types.SpiceDLADescr
"""
handle = ctypes.c_int(handle)
descr = stypes.SpiceDLADescr()
found = ctypes.c_int()
libspice.dlabfs_c(handle, ctypes.byref(descr), ctypes.byref(found))
return descr, bool(found.value) | Begin a forward segment search in a DLA file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlabfs_c.html
:param handle: Handle of open DLA file.
:type handle: int
:return: Descriptor of next segment in DLA file
:rtype: spiceypy.utils.support_types.SpiceDLADescr |
def extended_analog(self, pin, data):
"""
This method will send an extended-data analog write command
to the selected pin..
:param pin: 0 - 127
:param data: 0 - 0-0x4000 (14 bits)
:returns: No return value
"""
task = asyncio.ensure_future(self.core.extended_analog(pin, data))
self.loop.run_until_complete(task) | This method will send an extended-data analog write command
to the selected pin..
:param pin: 0 - 127
:param data: 0 - 0-0x4000 (14 bits)
:returns: No return value |
def serialize_formula(formula):
r'''Basic formula serializer to construct a consistently-formatted formula.
This is necessary for handling user-supplied formulas, which are not always
well formatted.
Performs no sanity checking that elements are actually elements.
Parameters
----------
formula : str
Formula string as parseable by the method nested_formula_parser, [-]
Returns
-------
formula : str
A consistently formatted formula to describe a molecular formula, [-]
Notes
-----
Examples
--------
>>> serialize_formula('Pd(NH3)4+3')
'H12N4Pd+3'
'''
charge = charge_from_formula(formula)
element_dict = nested_formula_parser(formula)
base = atoms_to_Hill(element_dict)
if charge == 0:
pass
elif charge > 0:
if charge == 1:
base += '+'
else:
base += '+' + str(charge)
elif charge < 0:
if charge == -1:
base += '-'
else:
base += str(charge)
return base | r'''Basic formula serializer to construct a consistently-formatted formula.
This is necessary for handling user-supplied formulas, which are not always
well formatted.
Performs no sanity checking that elements are actually elements.
Parameters
----------
formula : str
Formula string as parseable by the method nested_formula_parser, [-]
Returns
-------
formula : str
A consistently formatted formula to describe a molecular formula, [-]
Notes
-----
Examples
--------
>>> serialize_formula('Pd(NH3)4+3')
'H12N4Pd+3' |
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or :class:`list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators.
"""
if isinstance(validator, list):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator) | A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or :class:`list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators. |
def equivalent(kls, first, second):
"""
Tests that two skeletons are the same in form not merely that
their array contents are exactly the same. This test can be
made more sophisticated.
"""
if first.empty() and second.empty():
return True
elif first.vertices.shape[0] != second.vertices.shape[0]:
return False
elif first.edges.shape[0] != second.edges.shape[0]:
return False
EPSILON = 1e-7
vertex1, inv1 = np.unique(first.vertices, axis=0, return_inverse=True)
vertex2, inv2 = np.unique(second.vertices, axis=0, return_inverse=True)
vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)
if not vertex_match:
return False
remapping = {}
for i in range(len(inv1)):
remapping[inv1[i]] = inv2[i]
remap = np.vectorize(lambda idx: remapping[idx])
edges1 = np.sort(np.unique(first.edges, axis=0), axis=1)
edges1 = edges1[np.lexsort(edges1[:,::-1].T)]
edges2 = remap(second.edges)
edges2 = np.sort(np.unique(edges2, axis=0), axis=1)
edges2 = edges2[np.lexsort(edges2[:,::-1].T)]
edges_match = np.all(edges1 == edges2)
if not edges_match:
return False
second_verts = {}
for i, vert in enumerate(second.vertices):
second_verts[tuple(vert)] = i
for i in range(len(first.radii)):
i2 = second_verts[tuple(first.vertices[i])]
if first.radii[i] != second.radii[i2]:
return False
if first.vertex_types[i] != second.vertex_types[i2]:
return False
return True | Tests that two skeletons are the same in form not merely that
their array contents are exactly the same. This test can be
made more sophisticated. |
def check_var_units(self, ds):
'''
Checks each applicable variable for the units attribute
:param netCDF4.Dataset ds: An open netCDF dataset
'''
results = []
for variable in self.get_applicable_variables(ds):
msgs = []
# Check units and dims for variable
unit_check = hasattr(ds.variables[variable], 'units')
no_dim_check = (getattr(ds.variables[variable], 'dimensions') == tuple())
# Check if we have no dimensions. If no dims, skip test
if no_dim_check:
continue
# Check if we have no units
if not unit_check:
msgs.append("units")
results.append(Result(BaseCheck.HIGH, unit_check, self._var_header.format(variable), msgs))
return results | Checks each applicable variable for the units attribute
:param netCDF4.Dataset ds: An open netCDF dataset |
def get_histogram_bins(min_, max_, std, count):
"""
Return optimal bins given the input parameters
"""
width = _get_bin_width(std, count)
count = int(round((max_ - min_) / width) + 1)
if count:
bins = [i * width + min_ for i in xrange(1, count + 1)]
else:
bins = [min_]
return bins | Return optimal bins given the input parameters |
def node_validate(node_dict, node_num, cmd_name):
"""Validate that command can be performed on target node."""
# cmd: [required-state, action-to-displayed, error-statement]
req_lu = {"run": ["stopped", "Already Running"],
"stop": ["running", "Already Stopped"],
"connect": ["running", "Can't Connect, Node Not Running"],
"details": [node_dict[node_num].state, ""]}
tm = {True: ("Node {1}{2}{0} ({5}{3}{0} on {1}{4}{0})".
format(C_NORM, C_WARN, node_num,
node_dict[node_num].name,
node_dict[node_num].cloud_disp, C_TI)),
False: req_lu[cmd_name][1]}
node_valid = bool(req_lu[cmd_name][0] == node_dict[node_num].state)
node_info = tm[node_valid]
return node_valid, node_info | Validate that command can be performed on target node. |
def NRMSE_sliding(data, pred, windowSize):
"""
Computing NRMSE in a sliding window
:param data:
:param pred:
:param windowSize:
:return: (window_center, NRMSE)
"""
halfWindowSize = int(round(float(windowSize)/2))
window_center = range(halfWindowSize, len(data)-halfWindowSize, int(round(float(halfWindowSize)/5.0)))
nrmse = []
for wc in window_center:
nrmse.append(NRMSE(data[wc-halfWindowSize:wc+halfWindowSize],
pred[wc-halfWindowSize:wc+halfWindowSize]))
return (window_center, nrmse) | Computing NRMSE in a sliding window
:param data:
:param pred:
:param windowSize:
:return: (window_center, NRMSE) |
Subsets and Splits