code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def iteritems(self):
r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1) | r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64 |
def saturation_equivalent_potential_temperature(pressure, temperature):
r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because
.. math:: T_{L} = T_{K}
it follows that
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
The saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(temperature).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, temperature).magnitude
th_l = t * (1000 / (p - e)) ** mpconsts.kappa
th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))
return th_es * units.kelvin | r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because
.. math:: T_{L} = T_{K}
it follows that
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
The saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available. |
def get_config_parameter_loglevel(config: ConfigParser,
section: str,
param: str,
default: int) -> int:
"""
Get ``loglevel`` parameter from ``configparser`` ``.INI`` file, e.g.
mapping ``'debug'`` to ``logging.DEBUG``.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
"""
try:
value = config.get(section, param).lower()
if value == "debug":
return logging.DEBUG # 10
elif value == "info":
return logging.INFO
elif value in ["warn", "warning"]:
return logging.WARN
elif value == "error":
return logging.ERROR
elif value in ["critical", "fatal"]:
return logging.CRITICAL # 50
else:
raise ValueError
except (TypeError, ValueError, NoOptionError, AttributeError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
return default | Get ``loglevel`` parameter from ``configparser`` ``.INI`` file, e.g.
mapping ``'debug'`` to ``logging.DEBUG``.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default |
def decr(self, field, by=1):
""" :see::meth:RedisMap.decr """
return self._client.hincrby(self.key_prefix, field, by * -1) | :see::meth:RedisMap.decr |
def get_file_checksum(path):
"""Get the checksum of a file (using ``sum``, Unix-only).
This function is only available on certain platforms.
Parameters
----------
path: str
The path of the file.
Returns
-------
int
The checksum.
Raises
------
IOError
If the file does not exist.
"""
if not (sys.platform.startswith('linux') or \
sys.platform in ['darwin', 'cygwin']):
raise OSError('This function is not available on your platform.')
assert isinstance(path, (str, _oldstr))
if not os.path.isfile(path): # not a file
raise IOError('File "%s" does not exist.' %(path))
# calculate checksum
sub = subproc.Popen('sum "%s"' %(path), bufsize=-1, shell=True,
stdout=subproc.PIPE)
stdoutdata = sub.communicate()[0]
assert sub.returncode == 0
# in Python 3, communicate() returns bytes that need to be decoded
encoding = locale.getpreferredencoding()
stdoutstr = str(stdoutdata, encoding=encoding)
file_checksum = int(stdoutstr.split(' ')[0])
logger.debug('Checksum of file "%s": %d', path, file_checksum)
return file_checksum | Get the checksum of a file (using ``sum``, Unix-only).
This function is only available on certain platforms.
Parameters
----------
path: str
The path of the file.
Returns
-------
int
The checksum.
Raises
------
IOError
If the file does not exist. |
def match(self, ra1, dec1, ra2, dec2, radius, maxmatch=1, convertToArray=True):
"""*Crossmatch two lists of ra/dec points*
This is very efficient for large search angles and large lists. Note, if you need to match against the same points many times, you should use a `Matcher` object
**Key Arguments:**
- ``ra1`` -- list, numpy array or single ra value (first coordinate set)
- ``dec1`` -- list, numpy array or single dec value (first coordinate set - must match ra1 array length)
- ``ra2`` -- list, numpy array or single ra value (second coordinate set)
- ``dec2`` -- list, numpy array or single dec value (second coordinate set - must match ra2 array length)
- ``radius`` -- search radius in degrees. Can be list, numpy array or single value. If list or numpy array must be same length as ra1 array length)
- ``maxmatch`` -- maximum number of matches to return. Set to `0` to match all points. Default *1* (i.e. closest match)
- ``convertToArray`` -- convert the coordinates into an array. Default *True*. Can bypass the conversion check if you are sure coordinates in numpy array
**Return:**
- ``matchIndices1`` -- match indices for list1 (ra1, dec1)
- ``matchIndices2`` -- match indices for list2 (ra2, dec2)
- ``sepDeg`` -- separations between matched corrdinates in degrees. All returned arrays are the same size
**Usage:**
To match 2 lists of corrdinates try something like this:
.. code-block:: python
twoArcsec = 2.0 / 3600.
raList1 = [200.0, 200.0, 200.0, 175.23, 21.36]
decList1 = [24.3, 24.3, 24.3, -28.25, -15.32]
raList2 = [200.0, 200.0, 200.0, 175.23, 55.25]
decList2 = [24.3 + 0.75 * twoArcsec, 24.3 + 0.25 * twoArcsec,
24.3 - 0.33 * twoArcsec, -28.25 + 0.58 * twoArcsec, 75.22]
matchIndices1, matchIndices2, seps = mesh.match(
ra1=raList1,
dec1=decList1,
ra2=raList2,
dec2=decList2,
radius=twoArcsec,
maxmatch=0
)
for m1, m2, s in zip(matchIndices1, matchIndices2, seps):
print raList1[m1], decList1[m1], " -> ", s * 3600., " arcsec -> ", raList2[m2], decList2[m2]
Note from the print statement, you can index the arrays ``raList1``, ``decList1`` with the ``matchIndices1`` array values and ``raList2``, ``decList2`` with the ``matchIndices2`` values.
"""
# CONVERT LISTS AND SINGLE VALUES TO ARRAYS OF FLOATS
ra1 = numpy.array(ra1, dtype='f8', ndmin=1, copy=False)
dec1 = numpy.array(dec1, dtype='f8', ndmin=1, copy=False)
ra2 = numpy.array(ra2, dtype='f8', ndmin=1, copy=False)
dec2 = numpy.array(dec2, dtype='f8', ndmin=1, copy=False)
radius = numpy.array(radius, dtype='f8', ndmin=1, copy=False)
# CHECK ARRAY SIZES MATCH
if (ra1.size != dec1.size
or ra2.size != ra2.size):
stup = (ra1.size, dec1.size, ra2.size, dec2.size)
raise ValueError("ra1 must equal dec1 in size "
"and ra2 must equal dec2 in size, "
"got %d,%d and %d,%d" % stup)
if radius.size != 1 and radius.size != ra2.size:
raise ValueError("radius size (%d) != 1 and"
" != ra2,dec2 size (%d)" % (radius.size, ra2.size))
# QUICK TRIMMING IN DEC SPACE OF BOTH SETS OF ARRAYS
decMatchIndices2 = (numpy.abs(dec1[:, None] - dec2) < radius).any(0)
decMatchIndices2 = numpy.where(decMatchIndices2)[0]
ra2a = ra2[decMatchIndices2]
dec2a = dec2[decMatchIndices2]
decMatchIndices1 = (numpy.abs(dec2[:, None] - dec1) < radius).any(0)
decMatchIndices1 = numpy.where(decMatchIndices1)[0]
ra1a = ra1[decMatchIndices1]
dec1a = dec1[decMatchIndices1]
# new way using a Matcher
depth = self.depth
matcher = Matcher(
log=self.log,
depth=depth,
ra=ra2a,
dec=dec2a,
convertToArray=convertToArray)
matchIndices1, matchIndices2, seps = matcher.match(
ra=ra1a,
dec=dec1a,
radius=radius,
maxmatch=maxmatch)
matchIndices1 = decMatchIndices1[matchIndices1]
matchIndices2 = decMatchIndices2[matchIndices2]
return matchIndices1, matchIndices2, seps | *Crossmatch two lists of ra/dec points*
This is very efficient for large search angles and large lists. Note, if you need to match against the same points many times, you should use a `Matcher` object
**Key Arguments:**
- ``ra1`` -- list, numpy array or single ra value (first coordinate set)
- ``dec1`` -- list, numpy array or single dec value (first coordinate set - must match ra1 array length)
- ``ra2`` -- list, numpy array or single ra value (second coordinate set)
- ``dec2`` -- list, numpy array or single dec value (second coordinate set - must match ra2 array length)
- ``radius`` -- search radius in degrees. Can be list, numpy array or single value. If list or numpy array must be same length as ra1 array length)
- ``maxmatch`` -- maximum number of matches to return. Set to `0` to match all points. Default *1* (i.e. closest match)
- ``convertToArray`` -- convert the coordinates into an array. Default *True*. Can bypass the conversion check if you are sure coordinates in numpy array
**Return:**
- ``matchIndices1`` -- match indices for list1 (ra1, dec1)
- ``matchIndices2`` -- match indices for list2 (ra2, dec2)
- ``sepDeg`` -- separations between matched corrdinates in degrees. All returned arrays are the same size
**Usage:**
To match 2 lists of corrdinates try something like this:
.. code-block:: python
twoArcsec = 2.0 / 3600.
raList1 = [200.0, 200.0, 200.0, 175.23, 21.36]
decList1 = [24.3, 24.3, 24.3, -28.25, -15.32]
raList2 = [200.0, 200.0, 200.0, 175.23, 55.25]
decList2 = [24.3 + 0.75 * twoArcsec, 24.3 + 0.25 * twoArcsec,
24.3 - 0.33 * twoArcsec, -28.25 + 0.58 * twoArcsec, 75.22]
matchIndices1, matchIndices2, seps = mesh.match(
ra1=raList1,
dec1=decList1,
ra2=raList2,
dec2=decList2,
radius=twoArcsec,
maxmatch=0
)
for m1, m2, s in zip(matchIndices1, matchIndices2, seps):
print raList1[m1], decList1[m1], " -> ", s * 3600., " arcsec -> ", raList2[m2], decList2[m2]
Note from the print statement, you can index the arrays ``raList1``, ``decList1`` with the ``matchIndices1`` array values and ``raList2``, ``decList2`` with the ``matchIndices2`` values. |
def add_dimension(dimension,**kwargs):
"""
Add the dimension defined into the object "dimension" to the DB
If dimension["project_id"] is None it means that the dimension is global, otherwise is property of a project
If the dimension exists emits an exception
"""
if numpy.isscalar(dimension):
# If it is a scalar, converts to an Object
dimension = {'name': dimension}
new_dimension = Dimension()
new_dimension.name = dimension["name"]
if "description" in dimension and dimension["description"] is not None:
new_dimension.description = dimension["description"]
if "project_id" in dimension and dimension["project_id"] is not None:
new_dimension.project_id = dimension["project_id"]
# Save on DB
db.DBSession.add(new_dimension)
db.DBSession.flush()
# Load all the record
db_dimension = db.DBSession.query(Dimension).filter(Dimension.id==new_dimension.id).one()
return JSONObject(db_dimension) | Add the dimension defined into the object "dimension" to the DB
If dimension["project_id"] is None it means that the dimension is global, otherwise is property of a project
If the dimension exists emits an exception |
def get_version(model_instance, version):
"""
try go load from the database one object with specific version
:param model_instance: instance in memory
:param version: version number
:return:
"""
version_field = get_version_fieldname(model_instance)
kwargs = {'pk': model_instance.pk, version_field: version}
return model_instance.__class__.objects.get(**kwargs) | try go load from the database one object with specific version
:param model_instance: instance in memory
:param version: version number
:return: |
def get_learning_path_session_for_objective_bank(self, objective_bank_id=None):
"""Gets the OsidSession associated with the learning path service
for the given objective bank.
arg: objectiveBankId (osid.id.Id): the Id of the
ObjectiveBank
return: (osid.learning.LearningPathSession) - a
LearningPathSession
raise: NotFound - no objective bank found by the given Id
raise: NullArgument - objectiveBankId is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supporty_learning_path() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_learning_path() and
supports_visible_federation() are true
"""
if not objective_bank_id:
raise NullArgument
if not self.supports_learning_path():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.LearningPathSession(objective_bank_id, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | Gets the OsidSession associated with the learning path service
for the given objective bank.
arg: objectiveBankId (osid.id.Id): the Id of the
ObjectiveBank
return: (osid.learning.LearningPathSession) - a
LearningPathSession
raise: NotFound - no objective bank found by the given Id
raise: NullArgument - objectiveBankId is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supporty_learning_path() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_learning_path() and
supports_visible_federation() are true |
def fnmatch( name, pat ):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
>>> fnmatch('bar', '*' )
True
>>> fnmatch('foo/bar', '*' )
False
>>> fnmatch('foo/bar', '**' )
True
>>> fnmatch('foo/bar', '*/*' )
True
>>> fnmatch('foo/bar', '**/*' )
True
>>> fnmatch('/bar', '**/*' )
True
>>> fnmatch('/', '**' )
True
>>> fnmatch('/', '*' )
False
"""
import os
name = os.path.normcase( name )
pat = os.path.normcase( pat )
return fnmatchcase( name, pat ) | Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
>>> fnmatch('bar', '*' )
True
>>> fnmatch('foo/bar', '*' )
False
>>> fnmatch('foo/bar', '**' )
True
>>> fnmatch('foo/bar', '*/*' )
True
>>> fnmatch('foo/bar', '**/*' )
True
>>> fnmatch('/bar', '**/*' )
True
>>> fnmatch('/', '**' )
True
>>> fnmatch('/', '*' )
False |
def word_wrap(self):
"""
Read-write setting determining whether lines of text in this shape
are wrapped to fit within the shape's width. Valid values are True,
False, or None. True and False turn word wrap on and off,
respectively. Assigning None to word wrap causes any word wrap
setting to be removed from the text frame, causing it to inherit this
setting from its style hierarchy.
"""
return {
ST_TextWrappingType.SQUARE: True,
ST_TextWrappingType.NONE: False,
None: None
}[self._txBody.bodyPr.wrap] | Read-write setting determining whether lines of text in this shape
are wrapped to fit within the shape's width. Valid values are True,
False, or None. True and False turn word wrap on and off,
respectively. Assigning None to word wrap causes any word wrap
setting to be removed from the text frame, causing it to inherit this
setting from its style hierarchy. |
def add_to_inventory(self):
"""Adds host to stack inventory"""
if not self.server_attrs:
return
for addy in self.server_attrs[A.server.PUBLIC_IPS]:
self.stack.add_host(addy, self.groups, self.hostvars) | Adds host to stack inventory |
def clear(self):
"""Clears the cache."""
if self._cache is None:
return _NO_RESULTS
if self._cache is not None:
with self._cache as k:
res = [x.as_operation() for x in k.values()]
k.clear()
k.out_deque.clear()
return res | Clears the cache. |
def bind_rows(df, other, join='outer', ignore_index=False):
"""
Binds DataFrames "vertically", stacking them together. This is equivalent
to `pd.concat` with `axis=0`.
Args:
df (pandas.DataFrame): Top DataFrame (passed in via pipe).
other (pandas.DataFrame): Bottom DataFrame.
Kwargs:
join (str): One of `"outer"` or `"inner"`. Outer join will preserve
columns not present in both DataFrames, whereas inner joining will
drop them.
ignore_index (bool): Indicates whether to consider pandas indices as
part of the concatenation (defaults to `False`).
"""
df = pd.concat([df, other], join=join, ignore_index=ignore_index, axis=0)
return df | Binds DataFrames "vertically", stacking them together. This is equivalent
to `pd.concat` with `axis=0`.
Args:
df (pandas.DataFrame): Top DataFrame (passed in via pipe).
other (pandas.DataFrame): Bottom DataFrame.
Kwargs:
join (str): One of `"outer"` or `"inner"`. Outer join will preserve
columns not present in both DataFrames, whereas inner joining will
drop them.
ignore_index (bool): Indicates whether to consider pandas indices as
part of the concatenation (defaults to `False`). |
def _get_installed(self):
"""Gets a list of the file paths to repo settings files that are
being monitored by the CI server.
"""
from utility import get_json
#This is a little tricky because the data file doesn't just have a list
#of installed servers. It also manages the script's database that tracks
#the user's interactions with it.
fulldata = get_json(self.instpath, {})
if "installed" in fulldata:
return fulldata["installed"]
else:
return [] | Gets a list of the file paths to repo settings files that are
being monitored by the CI server. |
def to_file(self, f, sorted=True, relativize=True, nl=None):
"""Write a zone to a file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames
str_type = basestring
else:
str_type = str
if nl is None:
opts = 'w'
else:
opts = 'wb'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
if sorted:
names = self.keys()
names.sort()
else:
names = self.iterkeys()
for n in names:
l = self[n].to_text(n, origin=self.origin,
relativize=relativize)
if nl is None:
print >> f, l
else:
f.write(l)
f.write(nl)
finally:
if want_close:
f.close() | Write a zone to a file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None |
def AddAnalogShortIdRecordNoStatus(site_service, tag, time_value, value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecordNoStatus(szService, szPointId,
tTime, dValue)
return nRet | This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:return: 0, if the data push is successful |
def _onIdle(self, evt):
'a GUI idle event'
evt.Skip()
FigureCanvasBase.idle_event(self, guiEvent=evt) | a GUI idle event |
def printUniqueTFAM(tfam, samples, prefix):
"""Prints a new TFAM with only unique samples.
:param tfam: a representation of a TFAM file.
:param samples: the position of the samples
:param prefix: the prefix of the output file name
:type tfam: list
:type samples: dict
:type prefix: str
"""
fileName = prefix + ".unique_samples.tfam"
try:
with open(fileName, "w") as outputFile:
for i in sorted(samples.values()):
print >>outputFile, "\t".join(tfam[i])
except IOError:
msg = "%(fileName)s: no such file"
raise ProgramError(msg) | Prints a new TFAM with only unique samples.
:param tfam: a representation of a TFAM file.
:param samples: the position of the samples
:param prefix: the prefix of the output file name
:type tfam: list
:type samples: dict
:type prefix: str |
def wait_for(self, timeout=None, **kwargs):
"""Wait for a specific matching message or timeout.
You specify the message by passing name=value keyword arguments to
this method. The first message received after this function has been
called that has all of the given keys with the given values will be
returned when this function is awaited.
If no matching message is received within the specified timeout (if
given), then asyncio.TimeoutError will be raised.
This function only matches a single message and removes itself once
the message is seen or the timeout expires.
Args:
timeout (float): Optional timeout, defaults to None for no timeout.
**kwargs: Keys to match in the message with their corresponding values.
You must pass at least one keyword argument so there is something
to look for.
Returns:
awaitable: The response
"""
if len(kwargs) == 0:
raise ArgumentError("You must specify at least one message field to wait on")
spec = MessageSpec(**kwargs)
future = self._add_waiter(spec)
future.add_done_callback(lambda x: self._remove_waiter(spec, future))
return asyncio.wait_for(future, timeout=timeout) | Wait for a specific matching message or timeout.
You specify the message by passing name=value keyword arguments to
this method. The first message received after this function has been
called that has all of the given keys with the given values will be
returned when this function is awaited.
If no matching message is received within the specified timeout (if
given), then asyncio.TimeoutError will be raised.
This function only matches a single message and removes itself once
the message is seen or the timeout expires.
Args:
timeout (float): Optional timeout, defaults to None for no timeout.
**kwargs: Keys to match in the message with their corresponding values.
You must pass at least one keyword argument so there is something
to look for.
Returns:
awaitable: The response |
def ast_to_html(self, ast, link_resolver):
"""
See the documentation of `to_ast` for
more information.
Args:
ast: PyCapsule, a capsule as returned by `to_ast`
link_resolver: hotdoc.core.links.LinkResolver, a link
resolver instance.
"""
out, _ = cmark.ast_to_html(ast, link_resolver)
return out | See the documentation of `to_ast` for
more information.
Args:
ast: PyCapsule, a capsule as returned by `to_ast`
link_resolver: hotdoc.core.links.LinkResolver, a link
resolver instance. |
def cache_status(db, aid, force=False):
"""Calculate and cache status for given anime.
Don't do anything if status already exists and force is False.
"""
with db:
cur = db.cursor()
if not force:
# We don't do anything if we already have this aid in our
# cache.
cur.execute('SELECT 1 FROM cache_anime WHERE aid=?', (aid,))
if cur.fetchone() is not None:
return
# Retrieve information for determining complete.
cur.execute(
'SELECT episodecount, enddate FROM anime WHERE aid=?', (aid,))
row = cur.fetchone()
if row is None:
raise ValueError('aid provided does not exist')
episodecount, enddate = row
# Select all regular episodes in ascending order.
cur.execute("""
SELECT number, user_watched FROM episode
WHERE aid=? AND type=?
ORDER BY number ASC
""", (aid, get_eptype(db, 'regular').id))
# We find the last consecutive episode that is user_watched.
number = 0
for number, watched in cur:
# Once we find the first unwatched episode, we set the last
# consecutive watched episode to the previous episode (or 0).
if watched == 0:
number -= 1
break
# We store this in the cache.
set_status(db, aid, enddate and episodecount <= number, number) | Calculate and cache status for given anime.
Don't do anything if status already exists and force is False. |
def set_experiment_winner(experiment):
"""Mark an alternative as the winner of the experiment."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
alternative_name = request.form.get('alternative')
alternative = Alternative(redis, alternative_name, experiment.name)
if alternative.name in experiment.alternative_names:
experiment.winner = alternative.name
return redirect(url_for('.index')) | Mark an alternative as the winner of the experiment. |
def cov_from_scales(self, scales):
"""Return a covariance matrix built from a dictionary of scales.
`scales` is a dictionary keyed by stochastic instances, and the
values refer are the variance of the jump distribution for each
stochastic. If a stochastic is a sequence, the variance must
have the same length.
"""
# Get array of scales
ord_sc = []
for stochastic in self.stochastics:
ord_sc.append(np.ravel(scales[stochastic]))
ord_sc = np.concatenate(ord_sc)
if np.squeeze(ord_sc).shape[0] != self.dim:
raise ValueError("Improper initial scales, dimension don't match",
(np.squeeze(ord_sc), self.dim))
# Scale identity matrix
return np.eye(self.dim) * ord_sc | Return a covariance matrix built from a dictionary of scales.
`scales` is a dictionary keyed by stochastic instances, and the
values refer are the variance of the jump distribution for each
stochastic. If a stochastic is a sequence, the variance must
have the same length. |
def _compute_fluxes(self):
"""
Compute integrated flux inside ellipse, as well as inside a
circle defined with the same semimajor axis.
Pixels in a square section enclosing circle are scanned; the
distance of each pixel to the isophote center is compared both
with the semimajor axis length and with the length of the
ellipse radius vector, and integrals are updated if the pixel
distance is smaller.
"""
# Compute limits of square array that encloses circle.
sma = self.sample.geometry.sma
x0 = self.sample.geometry.x0
y0 = self.sample.geometry.y0
xsize = self.sample.image.shape[1]
ysize = self.sample.image.shape[0]
imin = max(0, int(x0 - sma - 0.5) - 1)
jmin = max(0, int(y0 - sma - 0.5) - 1)
imax = min(xsize, int(x0 + sma + 0.5) + 1)
jmax = min(ysize, int(y0 + sma + 0.5) + 1)
# Integrate
if (jmax-jmin > 1) and (imax-imin) > 1:
y, x = np.mgrid[jmin:jmax, imin:imax]
radius, angle = self.sample.geometry.to_polar(x, y)
radius_e = self.sample.geometry.radius(angle)
midx = (radius <= sma)
values = self.sample.image[y[midx], x[midx]]
tflux_c = np.ma.sum(values)
npix_c = np.ma.count(values)
midx2 = (radius <= radius_e)
values = self.sample.image[y[midx2], x[midx2]]
tflux_e = np.ma.sum(values)
npix_e = np.ma.count(values)
else:
tflux_e = 0.
tflux_c = 0.
npix_e = 0
npix_c = 0
return tflux_e, tflux_c, npix_e, npix_c | Compute integrated flux inside ellipse, as well as inside a
circle defined with the same semimajor axis.
Pixels in a square section enclosing circle are scanned; the
distance of each pixel to the isophote center is compared both
with the semimajor axis length and with the length of the
ellipse radius vector, and integrals are updated if the pixel
distance is smaller. |
def get_inventory(self):
""" Request the api endpoint to retrieve information about the inventory
:return: Main Collection
:rtype: Collection
"""
if self._inventory is not None:
return self._inventory
self._inventory = self.resolver.getMetadata()
return self._inventory | Request the api endpoint to retrieve information about the inventory
:return: Main Collection
:rtype: Collection |
def _parse(cls, scope):
"""Parses the input scope into a normalized set of strings.
:param scope: A string or tuple containing zero or more scope names.
:return: A set of scope name strings, or a tuple with the default scope name.
:rtype: set
"""
if not scope:
return ('default',)
if isinstance(scope, string_types):
scope = scope.split(' ')
scope = {str(s).lower() for s in scope if s}
return scope or ('default',) | Parses the input scope into a normalized set of strings.
:param scope: A string or tuple containing zero or more scope names.
:return: A set of scope name strings, or a tuple with the default scope name.
:rtype: set |
def api_notifications():
"""Receive MTurk REST notifications."""
event_type = request.values['Event.1.EventType']
assignment_id = request.values['Event.1.AssignmentId']
# Add the notification to the queue.
db.logger.debug('rq: Queueing %s with id: %s for worker_function',
event_type, assignment_id)
q.enqueue(worker_function, event_type, assignment_id, None)
db.logger.debug('rq: Submitted Queue Length: %d (%s)', len(q),
', '.join(q.job_ids))
return success_response(request_type="notification") | Receive MTurk REST notifications. |
def info(self, correlation_id, message, *args, **kwargs):
"""
Logs an important information message
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param message: a human-readable message to log.
:param args: arguments to parameterize the message.
:param kwargs: arguments to parameterize the message.
"""
self._format_and_write(LogLevel.Info, correlation_id, None, message, args, kwargs) | Logs an important information message
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param message: a human-readable message to log.
:param args: arguments to parameterize the message.
:param kwargs: arguments to parameterize the message. |
def morph(clm1, clm2, t, lmax):
"""Interpolate linearly the two sets of sph harm. coeeficients."""
clm = (1 - t) * clm1 + t * clm2
grid_reco = clm.expand(lmax=lmax) # cut "high frequency" components
agrid_reco = grid_reco.to_array()
pts = []
for i, longs in enumerate(agrid_reco):
ilat = grid_reco.lats()[i]
for j, value in enumerate(longs):
ilong = grid_reco.lons()[j]
th = (90 - ilat) / 57.3
ph = ilong / 57.3
r = value + rbias
p = np.array([sin(th) * cos(ph), sin(th) * sin(ph), cos(th)]) * r
pts.append(p)
return pts | Interpolate linearly the two sets of sph harm. coeeficients. |
def transform_doc_comments(text):
"""
Parse XML content for references and other syntax.
This avoids an LXML dependency, we only need to parse out a small subset
of elements here. Iterate over string to reduce regex pattern complexity
and make substitutions easier
.. seealso::
`Doc comment reference <https://msdn.microsoft.com/en-us/library/5ast78ax.aspx>`
Reference on XML documentation comment syntax
"""
try:
while True:
found = DOC_COMMENT_SEE_PATTERN.search(text)
if found is None:
break
ref = found.group("attr_value").replace("<", "\<").replace("`", "\`")
reftype = "any"
replacement = ""
# Given the pattern of `\w:\w+`, inspect first letter of
# reference for identity type
if ref[1] == ":" and ref[0] in DOC_COMMENT_IDENTITIES:
reftype = DOC_COMMENT_IDENTITIES[ref[:1]]
ref = ref[2:]
replacement = ":{reftype}:`{ref}`".format(reftype=reftype, ref=ref)
elif ref[:2] == "!:":
replacement = ref[2:]
else:
replacement = ":any:`{ref}`".format(ref=ref)
# Escape following text
text_end = text[found.end() :]
text_start = text[: found.start()]
text_end = re.sub(r"^(\S)", r"\\\1", text_end)
text_start = re.sub(r"(\S)$", r"\1 ", text_start)
text = "".join([text_start, replacement, text_end])
while True:
found = DOC_COMMENT_PARAM_PATTERN.search(text)
if found is None:
break
# Escape following text
text_end = text[found.end() :]
text_start = text[: found.start()]
text_end = re.sub(r"^(\S)", r"\\\1", text_end)
text_start = re.sub(r"(\S)$", r"\1 ", text_start)
text = "".join(
[text_start, "``", found.group("attr_value"), "``", text_end]
)
except TypeError:
pass
return text | Parse XML content for references and other syntax.
This avoids an LXML dependency, we only need to parse out a small subset
of elements here. Iterate over string to reduce regex pattern complexity
and make substitutions easier
.. seealso::
`Doc comment reference <https://msdn.microsoft.com/en-us/library/5ast78ax.aspx>`
Reference on XML documentation comment syntax |
def chmod(self, path, mode, follow_symlinks=True):
"""Change the permissions of a file as encoded in integer mode.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
follow_symlinks: If `False` and `path` points to a symlink,
the link itself is affected instead of the linked object.
"""
try:
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
except IOError as io_error:
if io_error.errno == errno.ENOENT:
self.raise_os_error(errno.ENOENT, path)
raise
if self.is_windows_fs:
if mode & PERM_WRITE:
file_object.st_mode = file_object.st_mode | 0o222
else:
file_object.st_mode = file_object.st_mode & 0o777555
else:
file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |
(mode & PERM_ALL))
file_object.st_ctime = time.time() | Change the permissions of a file as encoded in integer mode.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
follow_symlinks: If `False` and `path` points to a symlink,
the link itself is affected instead of the linked object. |
def parse_variant_id(chrom, pos, ref, alt, variant_type):
"""Parse the variant id for a variant
variant_id is used to identify variants within a certain type of
analysis. It is not human readable since it is a md5 key.
Args:
chrom(str)
pos(str)
ref(str)
alt(str)
variant_type(str): 'clinical' or 'research'
Returns:
variant_id(str): The variant id converted to md5 string
"""
return generate_md5_key([chrom, pos, ref, alt, variant_type]) | Parse the variant id for a variant
variant_id is used to identify variants within a certain type of
analysis. It is not human readable since it is a md5 key.
Args:
chrom(str)
pos(str)
ref(str)
alt(str)
variant_type(str): 'clinical' or 'research'
Returns:
variant_id(str): The variant id converted to md5 string |
def ucc_circuit(theta):
"""
Implements
exp(-i theta X_{0}Y_{1})
:param theta: rotation parameter
:return: pyquil.Program
"""
generator = sX(0) * sY(1)
initial_prog = Program().inst(X(1), X(0))
# compiled program
program = initial_prog + exponentiate(float(theta) * generator) # float is required because pyquil has weird casting behavior
return program | Implements
exp(-i theta X_{0}Y_{1})
:param theta: rotation parameter
:return: pyquil.Program |
def add_func_edges(dsp, fun_id, nodes_bunch, edge_weights=None, input=True,
data_nodes=None):
"""
Adds function node edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param fun_id:
Function node id.
:type fun_id: str
:param nodes_bunch:
A container of nodes which will be iterated through once.
:type nodes_bunch: iterable
:param edge_weights:
Edge weights.
:type edge_weights: dict, optional
:param input:
If True the nodes_bunch are input nodes, otherwise are output nodes.
:type input: bool, optional
:param data_nodes:
Data nodes to be deleted if something fail.
:type data_nodes: list
:return:
List of new data nodes.
:rtype: list
"""
# Namespace shortcut for speed.
add_edge = _add_edge_dmap_fun(dsp.dmap, edge_weights)
node, add_data = dsp.dmap.nodes, dsp.add_data
remove_nodes = dsp.dmap.remove_nodes_from
# Define an error message.
msg = 'Invalid %sput id: {} is not a data node' % ['out', 'in'][input]
i, j = ('i', 'o') if input else ('o', 'i')
data_nodes = data_nodes or [] # Update data nodes.
for u in nodes_bunch: # Iterate nodes.
try:
if node[u]['type'] != 'data': # The node is not a data node.
data_nodes.append(fun_id) # Add function id to be removed.
remove_nodes(data_nodes) # Remove function and new data nodes.
raise ValueError(msg.format(u)) # Raise error.
except KeyError:
data_nodes.append(add_data(data_id=u)) # Add new data node.
add_edge(**{i: u, j: fun_id, 'w': u}) # Add edge.
return data_nodes | Adds function node edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param fun_id:
Function node id.
:type fun_id: str
:param nodes_bunch:
A container of nodes which will be iterated through once.
:type nodes_bunch: iterable
:param edge_weights:
Edge weights.
:type edge_weights: dict, optional
:param input:
If True the nodes_bunch are input nodes, otherwise are output nodes.
:type input: bool, optional
:param data_nodes:
Data nodes to be deleted if something fail.
:type data_nodes: list
:return:
List of new data nodes.
:rtype: list |
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units | Insert the LCL pressure into the profile. |
def instantiate(self, **extra_args):
""" Instantiate the model """
input_block = self.input_block.instantiate()
backbone = self.backbone.instantiate(**extra_args)
return StochasticPolicyModel(input_block, backbone, extra_args['action_space']) | Instantiate the model |
def iterstruct(self):
"""Yield data structures built from the JSON header specifications in a table"""
from rowgenerators.rowpipe.json import add_to_struct
json_headers = self.json_headers
for row in islice(self, 1, None): # islice skips header
d = {}
for pos, jh in json_headers:
add_to_struct(d, jh, row[pos])
yield d | Yield data structures built from the JSON header specifications in a table |
def channel2int(channel):
"""Try to convert the channel to an integer.
:param channel:
Channel string (e.g. can0, CAN1) or integer
:returns: Channel integer or `None` if unsuccessful
:rtype: int
"""
if channel is None:
return None
if isinstance(channel, int):
return channel
# String and byte objects have a lower() method
if hasattr(channel, "lower"):
match = re.match(r'.*(\d+)$', channel)
if match:
return int(match.group(1))
return None | Try to convert the channel to an integer.
:param channel:
Channel string (e.g. can0, CAN1) or integer
:returns: Channel integer or `None` if unsuccessful
:rtype: int |
def z_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the z axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
return R | Generates a 3x3 rotation matrix for a rotation of angle
theta about the z axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix. |
def parsemeta(metadataloc):
"""Parses the metadata from a Landsat image bundle.
Arguments:
metadataloc: a filename or a directory.
Returns metadata dictionary
"""
# filename or directory? if several fit, use first one and warn
if os.path.isdir(metadataloc):
metalist = glob.glob(os.path.join(metadataloc, METAPATTERN))
if not metalist:
raise MTLParseError(
"No files matching metadata file pattern in directory %s."
% metadataloc)
elif len(metalist) > 0:
metadatafn = metalist[0]
filehandle = open(metadatafn, 'r')
if len(metalist) > 1:
logging.warning(
"More than one file in directory match metadata "
+ "file pattern. Using %s." % metadatafn)
elif os.path.isfile(metadataloc):
metadatafn = metadataloc
filehandle = open(metadatafn, 'r')
logging.info("Using file %s." % metadatafn)
elif 'L1_METADATA_FILE' in metadataloc:
filehandle = StringIO(metadataloc)
else:
raise MTLParseError(
"File location %s is unavailable " % metadataloc
+ "or doesn't contain a suitable metadata file.")
# Reading file line by line and inserting data into metadata dictionary
status = 0
metadata = {}
grouppath = []
dictpath = [metadata]
for line in filehandle:
if status == 4:
# we reached the end in the previous iteration,
# but are still reading lines
logging.warning(
"Metadata file %s appears to " % metadatafn
+ "have extra lines after the end of the metadata. "
+ "This is probably, but not necessarily, harmless.")
status = _checkstatus(status, line)
grouppath, dictpath = _transstat(status, grouppath, dictpath, line)
return metadata | Parses the metadata from a Landsat image bundle.
Arguments:
metadataloc: a filename or a directory.
Returns metadata dictionary |
def get_file_language(self, file):
"""
Returns the language of given file.
:param file: File to get language of.
:type file: unicode
:return: File language.
:rtype: Language
"""
for language in self.__languages:
if re.search(language.extensions, file):
LOGGER.debug("> '{0}' file detected language: '{1}'.".format(file, language.name))
return language | Returns the language of given file.
:param file: File to get language of.
:type file: unicode
:return: File language.
:rtype: Language |
def set_col_width(self, col, tab, width):
"""Sets column width"""
try:
old_width = self.col_widths.pop((col, tab))
except KeyError:
old_width = None
if width is not None:
self.col_widths[(col, tab)] = float(width) | Sets column width |
def _set_and_filter(self):
"""Filters the registered updates and sort out what is not needed
This method filters out the formatoptions that have not changed, sets
the new value and returns an iterable that is sorted by the priority
(highest priority comes first) and dependencies
Returns
-------
list
list of :class:`Formatoption` objects that have to be updated"""
fmtos = []
seen = set()
for key in self._force:
self._registered_updates.setdefault(key, getattr(self, key).value)
for key, value in chain(
six.iteritems(self._registered_updates),
six.iteritems(
{key: getattr(self, key).default for key in self})
if self._todefault else ()):
if key in seen:
continue
seen.add(key)
fmto = getattr(self, key)
# if the key is shared, a warning will be printed as long as
# this plotter is not also updating (for example due to a whole
# project update)
if key in self._shared and key not in self._force:
if not self._shared[key].plotter._updating:
warn(("%s formatoption is shared with another plotter."
" Use the unshare method to enable the updating") % (
fmto.key),
logger=self.logger)
changed = False
else:
try:
changed = fmto.check_and_set(
value, todefault=self._todefault,
validate=not self.no_validation)
except Exception as e:
self._registered_updates.pop(key, None)
self.logger.debug('Failed to set %s', key)
raise e
changed = changed or key in self._force
if changed:
fmtos.append(fmto)
fmtos = self._insert_additionals(fmtos, seen)
for fmto in fmtos:
fmto.lock.acquire()
self._todefault = False
self._registered_updates.clear()
self._force.clear()
return fmtos | Filters the registered updates and sort out what is not needed
This method filters out the formatoptions that have not changed, sets
the new value and returns an iterable that is sorted by the priority
(highest priority comes first) and dependencies
Returns
-------
list
list of :class:`Formatoption` objects that have to be updated |
def get_or_create(cls, name):
"""
Return the instance of the class with the specified name. If it doesn't
already exist, create it.
"""
obj = cls.query.filter_by(name=name).one_or_none()
if obj:
return obj
try:
with session.begin_nested():
obj = cls(name=name)
session.add(obj)
session.flush()
return obj
except IntegrityError:
log.debug('Collision when adding %s(name="%s"), returning existing object',
cls.__name__, name)
return cls.query.filter_by(name=name).one() | Return the instance of the class with the specified name. If it doesn't
already exist, create it. |
def _set_motion_detection(self, enable):
"""Set desired motion detection state on camera"""
url = ('%s/ISAPI/System/Video/inputs/'
'channels/1/motionDetection') % self.root_url
enabled = self._motion_detection_xml.find(self.element_query('enabled'))
if enabled is None:
_LOGGING.error("Couldn't find 'enabled' in the xml")
_LOGGING.error('XML: %s', ET.tostring(self._motion_detection_xml))
return
enabled.text = 'true' if enable else 'false'
xml = ET.tostring(self._motion_detection_xml)
try:
response = self.hik_request.put(url, data=xml, timeout=CONNECT_TIMEOUT)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to set MotionDetection, error: %s', err)
return
if response.status_code == requests.codes.unauthorized:
_LOGGING.error('Authentication failed')
return
if response.status_code != requests.codes.ok:
# If we didn't receive 200, abort
_LOGGING.error('Unable to set motion detection: %s', response.text)
self.motion_detection = enable | Set desired motion detection state on camera |
def page_missing(request, page_name, revision_requested, protected=False):
"""Displayed if page or revision does not exist."""
return Response(
generate_template(
"page_missing.html",
page_name=page_name,
revision_requested=revision_requested,
protected=protected,
),
status=404,
) | Displayed if page or revision does not exist. |
def load_ensembl_coverage(cohort, coverage_path, min_tumor_depth, min_normal_depth=0,
pageant_dir_fn=None):
"""
Load in Pageant CoverageDepth results with Ensembl loci.
coverage_path is a path to Pageant CoverageDepth output directory, with
one subdirectory per patient and a `cdf.csv` file inside each patient subdir.
If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate
join tumor/normal coverage.
pageant_dir_fn is a function that takes in a Patient and produces a Pageant
dir name.
Last tested with Pageant CoverageDepth version 1ca9ed2.
"""
# Function to grab the pageant file name using the Patient
if pageant_dir_fn is None:
pageant_dir_fn = lambda patient: patient.id
columns_both = [
"depth1", # Normal
"depth2", # Tumor
"onBP1",
"onBP2",
"numOnLoci",
"fracBPOn1",
"fracBPOn2",
"fracLociOn",
"offBP1",
"offBP2",
"numOffLoci",
"fracBPOff1",
"fracBPOff2",
"fracLociOff",
]
columns_single = [
"depth",
"onBP",
"numOnLoci",
"fracBPOn",
"fracLociOn",
"offBP",
"numOffLoci",
"fracBPOff",
"fracLociOff"
]
if min_normal_depth < 0:
raise ValueError("min_normal_depth must be >= 0")
use_tumor_only = (min_normal_depth == 0)
columns = columns_single if use_tumor_only else columns_both
ensembl_loci_dfs = []
for patient in cohort:
patient_ensembl_loci_df = pd.read_csv(
path.join(coverage_path, pageant_dir_fn(patient), "cdf.csv"),
names=columns,
header=1)
# pylint: disable=no-member
# pylint gets confused by read_csv
if use_tumor_only:
depth_mask = (patient_ensembl_loci_df.depth == min_tumor_depth)
else:
depth_mask = (
(patient_ensembl_loci_df.depth1 == min_normal_depth) &
(patient_ensembl_loci_df.depth2 == min_tumor_depth))
patient_ensembl_loci_df = patient_ensembl_loci_df[depth_mask]
assert len(patient_ensembl_loci_df) == 1, (
"Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}".format(
min_tumor_depth, min_normal_depth, len(patient_ensembl_loci_df), patient))
patient_ensembl_loci_df["patient_id"] = patient.id
ensembl_loci_dfs.append(patient_ensembl_loci_df)
ensembl_loci_df = pd.concat(ensembl_loci_dfs)
ensembl_loci_df["MB"] = ensembl_loci_df.numOnLoci / 1000000.0
return ensembl_loci_df[["patient_id", "numOnLoci", "MB"]] | Load in Pageant CoverageDepth results with Ensembl loci.
coverage_path is a path to Pageant CoverageDepth output directory, with
one subdirectory per patient and a `cdf.csv` file inside each patient subdir.
If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate
join tumor/normal coverage.
pageant_dir_fn is a function that takes in a Patient and produces a Pageant
dir name.
Last tested with Pageant CoverageDepth version 1ca9ed2. |
def Match(self, artifact=None, os_name=None, cpe=None, label=None):
"""Test if host data should trigger a check.
Args:
artifact: An artifact name.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
A list of conditions that match.
"""
return [
c for c in self.conditions if c.Match(artifact, os_name, cpe, label)
] | Test if host data should trigger a check.
Args:
artifact: An artifact name.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
A list of conditions that match. |
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data) | Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not |
def app_versions(self):
"""List of the versions of the internal KE-chain 'app' modules."""
if not self._app_versions:
app_versions_url = self._build_url('versions')
response = self._request('GET', app_versions_url)
if response.status_code == requests.codes.not_found:
self._app_versions = []
elif response.status_code == requests.codes.forbidden:
raise ForbiddenError(response.json()['results'][0]['detail'])
elif response.status_code != requests.codes.ok:
raise APIError("Could not retrieve app versions: {}".format(response))
else:
self._app_versions = response.json().get('results')
return self._app_versions | List of the versions of the internal KE-chain 'app' modules. |
def update_progress(opts, progress, progress_iter, out):
'''
Update the progress iterator for the given outputter
'''
# Look up the outputter
try:
progress_outputter = salt.loader.outputters(opts)[out]
except KeyError: # Outputter is not loaded
log.warning('Progress outputter not available.')
return False
progress_outputter(progress, progress_iter) | Update the progress iterator for the given outputter |
def summed_probabilities(self, choosers, alternatives):
"""
Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together.
"""
if len(alternatives) == 0 or len(choosers) == 0:
return pd.Series()
logger.debug(
'start: calculate summed probabilities in LCM group {}'.format(
self.name))
probs = []
for name, df in self._iter_groups(choosers):
probs.append(
self.models[name].summed_probabilities(df, alternatives))
add = tz.curry(pd.Series.add, fill_value=0)
probs = tz.reduce(add, probs)
logger.debug(
'finish: calculate summed probabilities in LCM group {}'.format(
self.name))
return probs | Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together. |
def vsound(H):
"""Speed of sound"""
T = temperature(H)
a = np.sqrt(gamma * R * T)
return a | Speed of sound |
def open_tablebase(directory: PathLike, *, libgtb: Any = None, LibraryLoader: Any = ctypes.cdll) -> Union[NativeTablebase, PythonTablebase]:
"""
Opens a collection of tables for probing.
First native access via the shared library libgtb is tried. You can
optionally provide a specific library name or a library loader.
The shared library has global state and caches, so only one instance can
be open at a time.
Second, pure Python probing code is tried.
"""
try:
if LibraryLoader:
return open_tablebase_native(directory, libgtb=libgtb, LibraryLoader=LibraryLoader)
except (OSError, RuntimeError) as err:
LOGGER.info("Falling back to pure Python tablebase: %r", err)
tables = PythonTablebase()
tables.add_directory(directory)
return tables | Opens a collection of tables for probing.
First native access via the shared library libgtb is tried. You can
optionally provide a specific library name or a library loader.
The shared library has global state and caches, so only one instance can
be open at a time.
Second, pure Python probing code is tried. |
def get_phonetic_info(self, lang):
"""For a specified language (lang), it returns the matrix and the vecto
containing specifications of the characters.
"""
phonetic_data = self.all_phonetic_data if lang != LC_TA else self.tamil_phonetic_data
phonetic_vectors = self.all_phonetic_vectors if lang != LC_TA else self.tamil_phonetic_vectors
return phonetic_data, phonetic_vectors | For a specified language (lang), it returns the matrix and the vecto
containing specifications of the characters. |
def keys_to_datetime(obj, *keys):
""" Converts all the keys in an object to DateTime instances.
Args:
obj (dict): the JSON-like ``dict`` object to modify inplace.
keys (str): keys of the object being converted into DateTime
instances.
Returns:
dict: ``obj`` inplace.
>>> keys_to_datetime(None) is None
True
>>> keys_to_datetime({})
{}
>>> a = {}
>>> id(keys_to_datetime(a)) == id(a)
True
>>> a = {'one': '2016-06-06T19:41:43.039284',
'two': '2016-06-06T19:41:43.039284'}
>>> keys_to_datetime(a) == a
True
>>> keys_to_datetime(a, 'one')['one']
datetime.datetime(2016, 6, 6, 19, 41, 43, 39284)
>>> keys_to_datetime(a, 'one')['two']
'2016-06-06T19:41:43.039284'
"""
if not keys:
return obj
for k in keys:
if k not in obj:
continue
v = obj[k]
if not isinstance(v, string_types):
continue
obj[k] = parse_datetime(v)
return obj | Converts all the keys in an object to DateTime instances.
Args:
obj (dict): the JSON-like ``dict`` object to modify inplace.
keys (str): keys of the object being converted into DateTime
instances.
Returns:
dict: ``obj`` inplace.
>>> keys_to_datetime(None) is None
True
>>> keys_to_datetime({})
{}
>>> a = {}
>>> id(keys_to_datetime(a)) == id(a)
True
>>> a = {'one': '2016-06-06T19:41:43.039284',
'two': '2016-06-06T19:41:43.039284'}
>>> keys_to_datetime(a) == a
True
>>> keys_to_datetime(a, 'one')['one']
datetime.datetime(2016, 6, 6, 19, 41, 43, 39284)
>>> keys_to_datetime(a, 'one')['two']
'2016-06-06T19:41:43.039284' |
def get_ready_user_tasks(self):
"""
Returns a list of User Tasks that are READY for user action
"""
return [t for t in self.get_tasks(Task.READY)
if not self._is_engine_task(t.task_spec)] | Returns a list of User Tasks that are READY for user action |
def create_shepherd_tour(self, name=None, theme=None):
""" Creates a Shepherd JS website tour.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
theme - Sets the default theme for the tour.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("light" is used if None is selected.)
"""
shepherd_theme = "shepherd-theme-arrows"
if theme:
if theme.lower() == "default":
shepherd_theme = "shepherd-theme-default"
elif theme.lower() == "dark":
shepherd_theme = "shepherd-theme-dark"
elif theme.lower() == "light":
shepherd_theme = "shepherd-theme-arrows"
elif theme.lower() == "arrows":
shepherd_theme = "shepherd-theme-arrows"
elif theme.lower() == "square":
shepherd_theme = "shepherd-theme-square"
elif theme.lower() == "square-dark":
shepherd_theme = "shepherd-theme-square-dark"
if not name:
name = "default"
new_tour = (
"""
// Shepherd Tour
var tour = new Shepherd.Tour({
defaults: {
classes: '%s',
scrollTo: true
}
});
var allButtons = {
skip: {
text: "Skip",
action: tour.cancel,
classes: 'shepherd-button-secondary tour-button-left'
},
back: {
text: "Back",
action: tour.back,
classes: 'shepherd-button-secondary'
},
next: {
text: "Next",
action: tour.next,
classes: 'shepherd-button-primary tour-button-right'
},
};
var firstStepButtons = [allButtons.skip, allButtons.next];
var midTourButtons = [allButtons.back, allButtons.next];
""" % shepherd_theme)
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour) | Creates a Shepherd JS website tour.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
theme - Sets the default theme for the tour.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("light" is used if None is selected.) |
def loc_info(text, index):
'''Location of `index` in source code `text`.'''
if index > len(text):
raise ValueError('Invalid index.')
line, last_ln = text.count('\n', 0, index), text.rfind('\n', 0, index)
col = index - (last_ln + 1)
return (line, col) | Location of `index` in source code `text`. |
def enqueue(self, message, *, delay=None):
"""Enqueue a message.
Parameters:
message(Message): The message to enqueue.
delay(int): The minimum amount of time, in milliseconds, to
delay the message by. Must be less than 7 days.
Raises:
ValueError: If ``delay`` is longer than 7 days.
"""
queue_name = message.queue_name
# Each enqueued message must have a unique id in Redis so
# using the Message's id isn't safe because messages may be
# retried.
message = message.copy(options={
"redis_message_id": str(uuid4()),
})
if delay is not None:
queue_name = dq_name(queue_name)
message_eta = current_millis() + delay
message = message.copy(
queue_name=queue_name,
options={
"eta": message_eta,
},
)
self.logger.debug("Enqueueing message %r on queue %r.", message.message_id, queue_name)
self.emit_before("enqueue", message, delay)
self.do_enqueue(queue_name, message.options["redis_message_id"], message.encode())
self.emit_after("enqueue", message, delay)
return message | Enqueue a message.
Parameters:
message(Message): The message to enqueue.
delay(int): The minimum amount of time, in milliseconds, to
delay the message by. Must be less than 7 days.
Raises:
ValueError: If ``delay`` is longer than 7 days. |
def pgrrec(body, lon, lat, alt, re, f):
"""
Convert planetographic coordinates to rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pgrrec_c.html
:param body: Body with which coordinate system is associated.
:type body: str
:param lon: Planetographic longitude of a point (radians).
:type lon: float
:param lat: Planetographic latitude of a point (radians).
:type lat: float
:param alt: Altitude of a point above reference spheroid.
:type alt: float
:param re: Equatorial radius of the reference spheroid.
:type re: float
:param f: Flattening coefficient.
:type f: float
:return: Rectangular coordinates of the point.
:rtype: 3-Element Array of floats
"""
body = stypes.stringToCharP(body)
lon = ctypes.c_double(lon)
lat = ctypes.c_double(lat)
alt = ctypes.c_double(alt)
re = ctypes.c_double(re)
f = ctypes.c_double(f)
rectan = stypes.emptyDoubleVector(3)
libspice.pgrrec_c(body, lon, lat, alt, re, f, rectan)
return stypes.cVectorToPython(rectan) | Convert planetographic coordinates to rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pgrrec_c.html
:param body: Body with which coordinate system is associated.
:type body: str
:param lon: Planetographic longitude of a point (radians).
:type lon: float
:param lat: Planetographic latitude of a point (radians).
:type lat: float
:param alt: Altitude of a point above reference spheroid.
:type alt: float
:param re: Equatorial radius of the reference spheroid.
:type re: float
:param f: Flattening coefficient.
:type f: float
:return: Rectangular coordinates of the point.
:rtype: 3-Element Array of floats |
def from_collection_xml(cls, xml_content):
"""Build a :class:`~zenodio.harvest.Datacite3Collection` from
Datecite3-formatted XML.
Users should use :func:`zenodio.harvest.harvest_collection` to build a
:class:`~zenodio.harvest.Datacite3Collection` for a Community.
Parameters
----------
xml_content : str
Datacite3-formatted XML content.
Returns
-------
collection : :class:`Datacite3Collection`
The collection parsed from Zenodo OAI-PMH XML content.
"""
xml_dataset = xmltodict.parse(xml_content, process_namespaces=False)
# Unwrap the record list when harvesting a collection's datacite 3
xml_records = xml_dataset['OAI-PMH']['ListRecords']['record'] # NOQA
return cls(xml_records) | Build a :class:`~zenodio.harvest.Datacite3Collection` from
Datecite3-formatted XML.
Users should use :func:`zenodio.harvest.harvest_collection` to build a
:class:`~zenodio.harvest.Datacite3Collection` for a Community.
Parameters
----------
xml_content : str
Datacite3-formatted XML content.
Returns
-------
collection : :class:`Datacite3Collection`
The collection parsed from Zenodo OAI-PMH XML content. |
def get_foldrate(seq, secstruct):
"""Submit sequence and structural class to FOLD-RATE calculator (http://www.iitm.ac.in/bioinfo/fold-rate/)
to calculate kinetic folding rate.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
secstruct (str): Structural class: `all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
Returns:
float: Kinetic folding rate k_f
"""
seq = ssbio.protein.sequence.utils.cast_to_str(seq)
url = 'http://www.iitm.ac.in/bioinfo/cgi-bin/fold-rate/foldrateCalculator.pl'
values = {'sequence': seq, 'eqn': secstruct}
data = urlencode(values)
data = data.encode('ASCII')
response = urlopen(url, data)
result = str(response.read())
ind = str.find(result, 'The folding rate,')
result2 = result[ind:ind + 70]
ind1 = str.find(result2, '=')
ind2 = str.find(result2, '/sec')
rate = result2[ind1 + 2:ind2]
return rate | Submit sequence and structural class to FOLD-RATE calculator (http://www.iitm.ac.in/bioinfo/fold-rate/)
to calculate kinetic folding rate.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
secstruct (str): Structural class: `all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
Returns:
float: Kinetic folding rate k_f |
def widths_in_range_mm(
self,
minwidth=EMIR_MINIMUM_SLITLET_WIDTH_MM,
maxwidth=EMIR_MAXIMUM_SLITLET_WIDTH_MM
):
"""Return list of slitlets which width is within given range
Parameters
----------
minwidth : float
Minimum slit width (mm).
maxwidth : float
Maximum slit width (mm).
Returns
-------
list_ok : list
List of booleans indicating whether the corresponding
slitlet width is within range
"""
list_ok = []
for i in range(EMIR_NBARS):
slitlet_ok = minwidth <= self._csu_bar_slit_width[i] <= maxwidth
if slitlet_ok:
list_ok.append(i + 1)
return list_ok | Return list of slitlets which width is within given range
Parameters
----------
minwidth : float
Minimum slit width (mm).
maxwidth : float
Maximum slit width (mm).
Returns
-------
list_ok : list
List of booleans indicating whether the corresponding
slitlet width is within range |
def calc_freefree_kappa(ne, t, hz):
"""Dulk (1985) eq 20, assuming pure hydrogen."""
return 9.78e-3 * ne**2 * hz**-2 * t**-1.5 * (24.5 + np.log(t) - np.log(hz)) | Dulk (1985) eq 20, assuming pure hydrogen. |
def _get_button_attrs(self, tool):
"""
Get the HTML attributes associated with a tool.
There are some standard attributes (class and title) that the template
will always want. Any number of additional attributes can be specified
and passed on. This is kinda awkward and due for a refactor for
readability.
"""
attrs = getattr(tool, 'attrs', {})
# href is not allowed to be set. should an exception be raised instead?
if 'href' in attrs:
attrs.pop('href')
# title is not allowed to be set. should an exception be raised instead?
# `short_description` should be set instead to parallel django admin
# actions
if 'title' in attrs:
attrs.pop('title')
default_attrs = {
'class': attrs.get('class', ''),
'title': getattr(tool, 'short_description', ''),
}
standard_attrs = {}
custom_attrs = {}
for k, v in dict(default_attrs, **attrs).items():
if k in default_attrs:
standard_attrs[k] = v
else:
custom_attrs[k] = v
return standard_attrs, custom_attrs | Get the HTML attributes associated with a tool.
There are some standard attributes (class and title) that the template
will always want. Any number of additional attributes can be specified
and passed on. This is kinda awkward and due for a refactor for
readability. |
def check_ip(ip, log=False):
"""Attempts a connection to the TV and checks if there really is a TV."""
if log:
print('Checking ip: {}...'.format(ip))
request_timeout = 0.1
try:
tv_url = 'http://{}:6095/request?action=isalive'.format(ip)
request = requests.get(tv_url, timeout=request_timeout)
except requests.exceptions.ConnectTimeout:
return False
return request.status_code == 200 | Attempts a connection to the TV and checks if there really is a TV. |
def _reproject(wcs1, wcs2):
"""
Perform the forward transformation of ``wcs1`` followed by the
inverse transformation of ``wcs2``.
Parameters
----------
wcs1, wcs2 : `~astropy.wcs.WCS` or `~gwcs.wcs.WCS`
The WCS objects.
Returns
-------
result : func
Function to compute the transformations. It takes x, y
positions in ``wcs1`` and returns x, y positions in
``wcs2``. The input and output x, y positions are zero
indexed.
"""
import gwcs
forward_origin = []
if isinstance(wcs1, fitswcs.WCS):
forward = wcs1.all_pix2world
forward_origin = [0]
elif isinstance(wcs2, gwcs.wcs.WCS):
forward = wcs1.forward_transform
else:
raise ValueError('wcs1 must be an astropy.wcs.WCS or '
'gwcs.wcs.WCS object.')
inverse_origin = []
if isinstance(wcs2, fitswcs.WCS):
inverse = wcs2.all_world2pix
inverse_origin = [0]
elif isinstance(wcs2, gwcs.wcs.WCS):
inverse = wcs2.forward_transform.inverse
else:
raise ValueError('wcs2 must be an astropy.wcs.WCS or '
'gwcs.wcs.WCS object.')
def _reproject_func(x, y):
forward_args = [x, y] + forward_origin
sky = forward(*forward_args)
inverse_args = sky + inverse_origin
return inverse(*inverse_args)
return _reproject_func | Perform the forward transformation of ``wcs1`` followed by the
inverse transformation of ``wcs2``.
Parameters
----------
wcs1, wcs2 : `~astropy.wcs.WCS` or `~gwcs.wcs.WCS`
The WCS objects.
Returns
-------
result : func
Function to compute the transformations. It takes x, y
positions in ``wcs1`` and returns x, y positions in
``wcs2``. The input and output x, y positions are zero
indexed. |
def broadcast(self, msg):
"""
Broadcasts msg to Scratch. msg can be a single message or an iterable
(list, tuple, set, generator, etc.) of messages.
"""
if getattr(msg, '__iter__', False): # iterable
for m in msg:
self._send('broadcast "%s"' % self._escape(str(m)))
else: # probably a string or number
self._send('broadcast "%s"' % self._escape(str(msg))) | Broadcasts msg to Scratch. msg can be a single message or an iterable
(list, tuple, set, generator, etc.) of messages. |
def search_windows(
self, winname=None, winclass=None, winclassname=None,
pid=None, only_visible=False, screen=None, require=False,
searchmask=0, desktop=None, limit=0, max_depth=-1):
"""
Search for windows.
:param winname:
Regexp to be matched against window name
:param winclass:
Regexp to be matched against window class
:param winclassname:
Regexp to be matched against window class name
:param pid:
Only return windows from this PID
:param only_visible:
If True, only return visible windows
:param screen:
Search only windows on this screen
:param require:
If True, will match ALL conditions. Otherwise, windows matching
ANY condition will be returned.
:param searchmask:
Search mask, for advanced usage. Leave this alone if you
don't kwnow what you are doing.
:param limit:
Maximum number of windows to list. Zero means no limit.
:param max_depth:
Maximum depth to return. Defaults to -1, meaning "no limit".
:return:
A list of window ids matching query.
"""
windowlist_ret = ctypes.pointer(window_t(0))
nwindows_ret = ctypes.c_uint(0)
search = xdo_search_t(searchmask=searchmask)
if winname is not None:
search.winname = winname
search.searchmask |= SEARCH_NAME
if winclass is not None:
search.winclass = winclass
search.searchmask |= SEARCH_CLASS
if winclassname is not None:
search.winclassname = winclassname
search.searchmask |= SEARCH_CLASSNAME
if pid is not None:
search.pid = pid
search.searchmask |= SEARCH_PID
if only_visible:
search.only_visible = True
search.searchmask |= SEARCH_ONLYVISIBLE
if screen is not None:
search.screen = screen
search.searchmask |= SEARCH_SCREEN
if screen is not None:
search.screen = desktop
search.searchmask |= SEARCH_DESKTOP
search.limit = limit
search.max_depth = max_depth
_libxdo.xdo_search_windows(
self._xdo, search,
ctypes.byref(windowlist_ret),
ctypes.byref(nwindows_ret))
return [windowlist_ret[i] for i in range(nwindows_ret.value)] | Search for windows.
:param winname:
Regexp to be matched against window name
:param winclass:
Regexp to be matched against window class
:param winclassname:
Regexp to be matched against window class name
:param pid:
Only return windows from this PID
:param only_visible:
If True, only return visible windows
:param screen:
Search only windows on this screen
:param require:
If True, will match ALL conditions. Otherwise, windows matching
ANY condition will be returned.
:param searchmask:
Search mask, for advanced usage. Leave this alone if you
don't kwnow what you are doing.
:param limit:
Maximum number of windows to list. Zero means no limit.
:param max_depth:
Maximum depth to return. Defaults to -1, meaning "no limit".
:return:
A list of window ids matching query. |
def sparse_dot_product_attention(q, k, v, bi, use_map_fn, experts_params):
"""Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from all different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
experts_params (dict): Additional params for the local expert
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v]
"""
batch_size, nb_heads, _, depth = common_layers.shape_list(q)
@expert_utils.add_name_scope()
def flatten_first_dims(x):
"""Reshape such that x is [num_heads, -1, depth]."""
# Case 1: Either constant batch size of size 1 or batch already flattened
if x.get_shape().as_list()[0] == 1:
return tf.squeeze(x, axis=0)
# Case 2: Flatten batch dimension
x = tf.transpose(x, perm=[1, 0, 2, 3])
x = tf.reshape(x, [nb_heads, -1, depth])
return x
def flatten_batch(x):
if x is None:
return x
return expert_utils.flatten_all_but_last(x)
q = flatten_first_dims(q)
k = flatten_first_dims(k)
v = flatten_first_dims(v)
bi = BatchInfo(
coordinates=flatten_batch(bi.coordinates),
order=flatten_batch(bi.order),
)
# Unstack heads
list_q = tf.unstack(q) # list[tf.Tensor(shape=[batch * length, depth])]
list_k = tf.unstack(k)
list_v = tf.unstack(v)
list_gates_q = []
list_gates_k = []
total_loss = 0.0
# There might be a more optimized way to compute all heads at once
for single_q, single_k, _ in zip(list_q, list_k, list_v):
# Each head get its own dispatcher
lhs_gating = LshGating(
depth=single_q.get_shape().as_list()[-1], **experts_params)
list_gates_q.append(lhs_gating.get_gates(single_q))
list_gates_k.append(lhs_gating.get_gates(single_k))
gates_q = tf.stack(list_gates_q)
gates_k = tf.stack(list_gates_k)
# Process each head separately.
v_out = map_fn_switch(
lambda args: dot_product_single_head(bi=bi, *args),
elems=(q, k, v, gates_q, gates_k),
dtype=(tf.float32),
parallel_iterations=2,
use_map_fn=use_map_fn,
)
# Restore original shape as expected by multihead_attention
if isinstance(batch_size, int) and batch_size == 1:
v_out = tf.expand_dims(v_out, axis=0) # Restore batch_size = 1
else:
v_out = tf.reshape(v_out, [nb_heads, batch_size, -1, depth])
v_out = tf.transpose(v_out, [1, 0, 2, 3])
return v_out, total_loss / nb_heads | Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from all different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
experts_params (dict): Additional params for the local expert
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v] |
def _set_ccm_interval(self, v, load=False):
"""
Setter method for ccm_interval, mapped from YANG variable /cfm_state/cfm_detail/domain/ma/ccm_interval (ccm-intervals)
If this variable is read-only (config: false) in the
source YANG file, then _set_ccm_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ccm_interval() directly.
YANG Description: CCM Interval
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'three-dot-three-ms': {'value': 3}, u'one-min': {'value': 60000}, u'ten-sec': {'value': 10000}, u'hundred-ms': {'value': 100}, u'ten-min': {'value': 600000}, u'one-sec': {'value': 1000}, u'ten-ms': {'value': 10}},), is_leaf=True, yang_name="ccm-interval", rest_name="ccm-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='ccm-intervals', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ccm_interval must be of a type compatible with ccm-intervals""",
'defined-type': "brocade-dot1ag-operational:ccm-intervals",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'three-dot-three-ms': {'value': 3}, u'one-min': {'value': 60000}, u'ten-sec': {'value': 10000}, u'hundred-ms': {'value': 100}, u'ten-min': {'value': 600000}, u'one-sec': {'value': 1000}, u'ten-ms': {'value': 10}},), is_leaf=True, yang_name="ccm-interval", rest_name="ccm-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='ccm-intervals', is_config=False)""",
})
self.__ccm_interval = t
if hasattr(self, '_set'):
self._set() | Setter method for ccm_interval, mapped from YANG variable /cfm_state/cfm_detail/domain/ma/ccm_interval (ccm-intervals)
If this variable is read-only (config: false) in the
source YANG file, then _set_ccm_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ccm_interval() directly.
YANG Description: CCM Interval |
def _get_dataset_showcase_dict(self, showcase):
# type: (Union[hdx.data.showcase.Showcase, Dict,str]) -> Dict
"""Get dataset showcase dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
dict: dataset showcase dict
"""
if isinstance(showcase, hdx.data.showcase.Showcase) or isinstance(showcase, dict):
if 'id' not in showcase:
showcase = hdx.data.showcase.Showcase.read_from_hdx(showcase['name'])
showcase = showcase['id']
elif not isinstance(showcase, str):
raise HDXError('Type %s cannot be added as a showcase!' % type(showcase).__name__)
if is_valid_uuid(showcase) is False:
raise HDXError('%s is not a valid showcase id!' % showcase)
return {'package_id': self.data['id'], 'showcase_id': showcase} | Get dataset showcase dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
dict: dataset showcase dict |
def load(self, buf = None):
"""This method opens an existing database.
self.password/self.keyfile and self.filepath must be set.
"""
if self.password is None and self.keyfile is None:
raise KPError('Need a password or keyfile')
elif self.filepath is None and buf is None:
raise KPError('Can only load an existing database!')
if buf is None:
buf = self.read_buf()
# The header is 124 bytes long, the rest is content
header = buf[:124]
crypted_content = buf[124:]
del buf
# The header holds two signatures
if not (struct.unpack('<I', header[:4])[0] == 0x9AA2D903 and
struct.unpack('<I', header[4:8])[0] == 0xB54BFB65):
del crypted_content
del header
raise KPError('Wrong signatures!')
# Unpack the header
self._enc_flag = struct.unpack('<I', header[8:12])[0]
self._version = struct.unpack('<I', header[12:16])[0]
self._final_randomseed = struct.unpack('<16s', header[16:32])[0]
self._enc_iv = struct.unpack('<16s', header[32:48])[0]
self._num_groups = struct.unpack('<I', header[48:52])[0]
self._num_entries = struct.unpack('<I', header[52:56])[0]
self._contents_hash = struct.unpack('<32s', header[56:88])[0]
self._transf_randomseed = struct.unpack('<32s', header[88:120])[0]
self._key_transf_rounds = struct.unpack('<I', header[120:124])[0]
del header
# Check if the database is supported
if self._version & 0xFFFFFF00 != 0x00030002 & 0xFFFFFF00:
del crypted_content
raise KPError('Unsupported file version!')
#Actually, only AES is supported.
elif not self._enc_flag & 2:
del crypted_content
raise KPError('Unsupported file encryption!')
if self.password is None:
masterkey = self._get_filekey()
elif self.password is not None and self.keyfile is not None:
passwordkey = self._get_passwordkey()
filekey = self._get_filekey()
sha = SHA256.new()
sha.update(passwordkey+filekey)
masterkey = sha.digest()
else:
masterkey = self._get_passwordkey()
# Create the key that is needed to...
final_key = self._transform_key(masterkey)
# ...decrypt the content
decrypted_content = self._cbc_decrypt(final_key, crypted_content)
# Check if decryption failed
if ((len(decrypted_content) > 2147483446) or
(len(decrypted_content) == 0 and self._num_groups > 0)):
del decrypted_content
del crypted_content
raise KPError("Decryption failed!\nThe key is wrong or the file is"
" damaged.")
sha_obj = SHA256.new()
sha_obj.update(decrypted_content)
if not self._contents_hash == sha_obj.digest():
del masterkey
del final_key
raise KPError("Hash test failed.\nThe key is wrong or the file is "
"damaged.")
del masterkey
del final_key
# Read out the groups
pos = 0
levels = []
cur_group = 0
group = v1Group()
while cur_group < self._num_groups:
# Every group is made up of single fields
field_type = struct.unpack('<H', decrypted_content[:2])[0]
decrypted_content = decrypted_content[2:]
pos += 2
# Check if offset is alright
if pos >= len(crypted_content)+124:
del decrypted_content
del crypted_content
raise KPError('Unexpected error: Offset is out of range.[G1]')
field_size = struct.unpack('<I', decrypted_content[:4])[0]
decrypted_content = decrypted_content[4:]
pos += 4
if pos >= len(crypted_content)+124:
del decrypted_content
del crypted_content
raise KPError('Unexpected error: Offset is out of range.[G2]')
# Finally read out the content
b_ret = self._read_group_field(group, levels, field_type,
field_size, decrypted_content)
# If the end of a group is reached append it to the groups array
if field_type == 0xFFFF and b_ret == True:
group.db = self
self.groups.append(group)
group = v1Group()
cur_group += 1
decrypted_content = decrypted_content[field_size:]
if pos >= len(crypted_content)+124:
del decrypted_content
del crypted_content
raise KPError('Unexpected error: Offset is out of range.[G1]')
# Now the same with the entries
cur_entry = 0
entry = v1Entry()
while cur_entry < self._num_entries:
field_type = struct.unpack('<H', decrypted_content[:2])[0]
decrypted_content = decrypted_content[2:]
pos += 2
if pos >= len(crypted_content)+124:
del decrypted_content
del crypted_content
raise KPError('Unexpected error: Offset is out of range.[G1]')
field_size = struct.unpack('<I', decrypted_content[:4])[0]
decrypted_content = decrypted_content[4:]
pos += 4
if pos >= len(crypted_content)+124:
del decrypted_content
del crypted_content
raise KPError('Unexpected error: Offset is out of range.[G2]')
b_ret = self._read_entry_field(entry, field_type, field_size,
decrypted_content)
if field_type == 0xFFFF and b_ret == True:
self.entries.append(entry)
if entry.group_id is None:
del decrypted_content
del crypted_content
raise KPError("Found entry without group!")
entry = v1Entry()
cur_entry += 1
decrypted_content = decrypted_content[field_size:]
pos += field_size
if pos >= len(crypted_content)+124:
del decrypted_content
del crypted_content
raise KPError('Unexpected error: Offset is out of range.[G1]')
if self._create_group_tree(levels) is False:
del decrypted_content
del crypted_content
return False
del decrypted_content
del crypted_content
if self.filepath is not None:
with open(self.filepath+'.lock', 'w') as handler:
handler.write('')
return True | This method opens an existing database.
self.password/self.keyfile and self.filepath must be set. |
def create_form_data(self, **kwargs):
"""Create groupings of form elements."""
# Get the specified keyword arguments.
children = kwargs.get('children', [])
sort_order = kwargs.get('sort_order', None)
solr_response = kwargs.get('solr_response', None)
superuser = kwargs.get('superuser', False)
# Get the vocabularies to pull the qualifiers from.
vocabularies = self.get_vocabularies()
# Loop through all UNTL elements in the Python object.
for element in children:
# Add children that are missing from the form.
element.children = add_missing_children(
element.contained_children,
element.children,
)
# Add the form attribute to the element.
element.add_form(
vocabularies=vocabularies,
qualifier=element.qualifier,
content=element.content,
superuser=superuser,
)
# Element can contain children.
if element.form.has_children:
# If the parent has a qualifier,
# create a representative form element for the parent.
if getattr(element.form, 'qualifier_name', False):
add_parent = PARENT_FORM[element.form.qualifier_name](
content=element.qualifier,
)
# Add the parent to the list of child elements.
element.children.append(add_parent)
# Sort the elements by the index of child sort.
element.children.sort(
key=lambda obj: element.form.child_sort.index(obj.tag)
)
# Loop through the element's children (if it has any).
for child in element.children:
# Add the form attribute to the element.
child.add_form(
vocabularies=vocabularies,
qualifier=child.qualifier,
content=child.content,
parent_tag=element.tag,
superuser=superuser,
)
element_group_dict = {}
# Group related objects together.
for element in children:
# Make meta-hidden its own group.
if element.form.name == 'meta' and element.qualifier == 'hidden':
element_group_dict['hidden'] = [element]
# Element is not meta-hidden.
else:
# Make sure the dictionary key exists.
if element.form.name not in element_group_dict:
element_group_dict[element.form.name] = []
element_group_dict[element.form.name].append(element)
# If the hidden meta element doesn't exist, add it to its own group.
if 'hidden' not in element_group_dict:
hidden_element = PYUNTL_DISPATCH['meta'](
qualifier='hidden',
content='False')
hidden_element.add_form(
vocabularies=vocabularies,
qualifier=hidden_element.qualifier,
content=hidden_element.content,
superuser=superuser,
)
element_group_dict['hidden'] = [hidden_element]
# Create a list of group object elements.
element_list = self.create_form_groupings(
vocabularies,
solr_response,
element_group_dict,
sort_order,
)
# Return the list of UNTL elements with form data added.
return element_list | Create groupings of form elements. |
def create(self, healthCheckNotification, instance, ipAddressResourceId, name, notificationContacts, rules,
loadBalancerClassOfServiceID=1, *args, **kwargs):
"""
:type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service
"""
response = self._call(method=SetEnqueueLoadBalancerCreation,
healthCheckNotification=healthCheckNotification,
instance=instance,
ipAddressResourceId=ipAddressResourceId,
name=name,
notificationContacts=notificationContacts,
rules=rules,
loadBalancerClassOfServiceID=loadBalancerClassOfServiceID,
*args, **kwargs) | :type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service |
def fromXml(xml):
"""
Creates a new slide from XML.
:return <XWalkthroughSlide>
"""
slide = XWalkthroughSlide(**xml.attrib)
# create the items
for xgraphic in xml:
slide.addItem(XWalkthroughItem.fromXml(xgraphic))
return slide | Creates a new slide from XML.
:return <XWalkthroughSlide> |
def ndwi(self):
"""
Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03.
For Landsat8 and sentinel2 calculated by using Green and NIR bands.
Returns: numpy array of ndwi values
"""
data = self._read(self[self._ndwi_bands,...]).astype(np.float32)
return (data[1,:,:] - data[0,:,:]) / (data[0,:,:] + data[1,:,:]) | Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03.
For Landsat8 and sentinel2 calculated by using Green and NIR bands.
Returns: numpy array of ndwi values |
def sha1_hexdigest(self):
# type: () -> str
"""
A SHA-1 digest of the whole object for easy differentiation.
.. versionadded:: 18.1.0
"""
if self._sha1_hexdigest is None:
self._sha1_hexdigest = hashlib.sha1(self._pem_bytes).hexdigest()
return self._sha1_hexdigest | A SHA-1 digest of the whole object for easy differentiation.
.. versionadded:: 18.1.0 |
def get_abi_size(self, target_data, context=None):
"""
Get the ABI size of this type according to data layout *target_data*.
"""
llty = self._get_ll_pointer_type(target_data, context)
return target_data.get_pointee_abi_size(llty) | Get the ABI size of this type according to data layout *target_data*. |
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 2d convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs) | A block of standard 2d convolutions. |
def context_spec(self):
"""Spec for specifying context options"""
from harpoon.option_spec import image_objs
return dict_from_bool_spec(lambda meta, val: {"enabled": val}
, create_spec(image_objs.Context
, validators.deprecated_key("use_git_timestamps", "Since docker 1.8, timestamps no longer invalidate the docker layer cache")
, include = listof(string_spec())
, exclude = listof(string_spec())
, enabled = defaulted(boolean(), True)
, find_options = string_spec()
, parent_dir = directory_spec(formatted(defaulted(string_spec(), "{config_root}"), formatter=MergedOptionStringFormatter))
, use_gitignore = defaulted(boolean(), False)
, ignore_find_errors = defaulted(boolean(), False)
)
) | Spec for specifying context options |
def merge_results(x, y):
"""
Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict`
"""
z = x.copy()
for key, value in y.items():
if isinstance(value, list) and isinstance(z.get(key), list):
z[key] += value
else:
z[key] = value
return z | Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict` |
def _get_variables(self) -> Dict[str, str]:
"""
Gets the variables that should be set for this project.
:return: the variables
"""
variables = {} # type: Dict[str, str]
for group in self.groups:
setting_variables = self._read_group_variables(group)
variables.update(setting_variables)
return variables | Gets the variables that should be set for this project.
:return: the variables |
def _get_option(config, supplement, section, option, fallback=None):
"""
Reads an option for a configuration file.
:param configparser.ConfigParser config: The main config file.
:param configparser.ConfigParser supplement: The supplement config file.
:param str section: The name of the section op the option.
:param str option: The name of the option.
:param str|None fallback: The fallback value of the option if it is not set in either configuration files.
:rtype: str
:raise KeyError:
"""
if supplement:
return_value = supplement.get(section, option, fallback=config.get(section, option, fallback=fallback))
else:
return_value = config.get(section, option, fallback=fallback)
if fallback is None and return_value is None:
raise KeyError("Option '{0!s}' is not found in section '{1!s}'.".format(option, section))
return return_value | Reads an option for a configuration file.
:param configparser.ConfigParser config: The main config file.
:param configparser.ConfigParser supplement: The supplement config file.
:param str section: The name of the section op the option.
:param str option: The name of the option.
:param str|None fallback: The fallback value of the option if it is not set in either configuration files.
:rtype: str
:raise KeyError: |
def runInParallel(*fns):
"""
Runs multiple processes in parallel.
:type: fns: def
"""
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join() | Runs multiple processes in parallel.
:type: fns: def |
def age_to_BP(age, age_unit):
"""
Convert an age value into the equivalent in time Before Present(BP) where Present is 1950
Returns
---------
ageBP : number
"""
ageBP = -1e9
if age_unit == "Years AD (+/-)" or age_unit == "Years Cal AD (+/-)":
if age < 0:
age = age+1 # to correct for there being no 0 AD
ageBP = 1950-age
elif age_unit == "Years BP" or age_unit == "Years Cal BP":
ageBP = age
elif age_unit == "ka":
ageBP = age*1000
elif age_unit == "Ma":
ageBP = age*1e6
elif age_unit == "Ga":
ageBP = age*1e9
else:
print("Age unit invalid. Age set to -1.0e9")
return ageBP | Convert an age value into the equivalent in time Before Present(BP) where Present is 1950
Returns
---------
ageBP : number |
def trace_plot(precisions, path, n_edges=20, ground_truth=None, edges=[]):
"""Plot the change in precision (or covariance) coefficients as a function
of changing lambda and l1-norm. Always ignores diagonals.
Parameters
-----------
precisions : array of len(path) 2D ndarray, shape (n_features, n_features)
This is either precision_ or covariance_ from an InverseCovariance
estimator in path mode, or a list of results for individual runs of
the GraphLasso.
path : array of floats (descending)
This is path of lambdas explored.
n_edges : int (default=20)
Max number of edges to plot for each precision matrix along the path.
Only plots the maximum magnitude values (evaluating the last precision
matrix).
ground_truth : 2D ndarray, shape (n_features, n_features) (default=None)
If not None, plot the top n_edges/2 false positive and top n_edges/2
false negative indices when compared to ground_truth.
edges : list (default=[])
If not empty, use edges to determine which indicies of each precision
matrix to track. Should be arranged to index precisions[0].flat.
If non-empty, n_edges and ground_truth will be ignored.
"""
_check_path(path)
assert len(path) == len(precisions)
assert len(precisions) > 0
path = np.array(path)
dim, _ = precisions[0].shape
# determine which indices to track
if not edges:
base_precision = np.copy(precisions[-1])
base_precision[np.triu_indices(base_precision.shape[0])] = 0
if ground_truth is None:
# top n_edges strongest coefficients
edges = np.argsort(np.abs(base_precision.flat))[::-1][:n_edges]
else:
# top n_edges/2 false positives and negatives compared to truth
assert ground_truth.shape == precisions[0].shape
masked_gt = np.copy(ground_truth)
masked_gt[np.triu_indices(ground_truth.shape[0])] = 0
intersection = np.intersect1d(
np.nonzero(base_precision.flat)[0], np.nonzero(masked_gt.flat)[0]
)
# false positives
fp_precision = np.copy(base_precision)
fp_precision.flat[intersection] = 0
fp_edges = np.argsort(np.abs(fp_precision.flat))[::-1][: n_edges / 2]
# false negatives
fn_precision = np.copy(masked_gt)
fn_precision.flat[intersection] = 0
fn_edges = np.argsort(np.abs(fn_precision.flat))[::-1][: n_edges / 2]
edges = list(fp_edges) + list(fn_edges)
assert len(edges) < len(precisions[0].flat)
assert np.max(edges) < len(precisions[0].flat)
assert np.min(edges) >= 0
# reshape data a bit:
# flatten each matrix into a column (so that coeffs are examples)
# compute l1-norm of each column
l1_norms = []
coeffs = np.zeros((dim ** 2, len(precisions)))
for ridx, result in enumerate(precisions):
coeffs[edges, ridx] = result.flat[edges]
l1_norms.append(np.linalg.norm(coeffs[:, ridx]))
# remove any zero rows
coeffs = coeffs[np.linalg.norm(coeffs, axis=1) > 1e-10, :]
plt.figure()
# show coefficients as a function of lambda
plt.subplot(1, 2, 1)
for result in precisions:
plt.plot(l1_norms, coeffs.T, lw=1)
plt.xlim([np.min(l1_norms), np.max(l1_norms)])
plt.ylabel("Coefficients")
plt.xlabel("l1 Norm")
# show coefficients as a function of lambda
log_path = np.log(path)
plt.subplot(1, 2, 2)
for result in precisions:
plt.plot(log_path, coeffs.T, lw=1)
plt.xlim([np.min(log_path), np.max(log_path)])
plt.ylabel("Coefficients")
plt.xlabel("log-Lambda")
plt.show()
r_input("Press any key to continue.") | Plot the change in precision (or covariance) coefficients as a function
of changing lambda and l1-norm. Always ignores diagonals.
Parameters
-----------
precisions : array of len(path) 2D ndarray, shape (n_features, n_features)
This is either precision_ or covariance_ from an InverseCovariance
estimator in path mode, or a list of results for individual runs of
the GraphLasso.
path : array of floats (descending)
This is path of lambdas explored.
n_edges : int (default=20)
Max number of edges to plot for each precision matrix along the path.
Only plots the maximum magnitude values (evaluating the last precision
matrix).
ground_truth : 2D ndarray, shape (n_features, n_features) (default=None)
If not None, plot the top n_edges/2 false positive and top n_edges/2
false negative indices when compared to ground_truth.
edges : list (default=[])
If not empty, use edges to determine which indicies of each precision
matrix to track. Should be arranged to index precisions[0].flat.
If non-empty, n_edges and ground_truth will be ignored. |
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler | Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler. |
def hub_virtual_network_connections(self):
"""Instance depends on the API version:
* 2018-04-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.HubVirtualNetworkConnectionsOperations>`
"""
api_version = self._get_api_version('hub_virtual_network_connections')
if api_version == '2018-04-01':
from .v2018_04_01.operations import HubVirtualNetworkConnectionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-04-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.HubVirtualNetworkConnectionsOperations>` |
def load_yaml(yaml_file: str) -> Any:
"""
Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list
"""
with open(yaml_file, 'r') as file:
return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader) | Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list |
def post(self, command, data=None):
"""Post data to API."""
now = calendar.timegm(datetime.datetime.now().timetuple())
if now > self.expiration:
auth = self.__open("/oauth/token", data=self.oauth)
self.__sethead(auth['access_token'])
return self.__open("%s%s" % (self.api, command),
headers=self.head, data=data) | Post data to API. |
def from_domain(cls, domain, version=None, require_https=True):
"""
Try to find a hive for the given domain; raise an error if we have to
failover to HTTP and haven't explicitly suppressed it in the call.
"""
url = 'https://' + domain + '/api/hive.json'
try:
return cls.from_url(url, version=version, require_https=require_https)
except MissingHive:
url = 'http://' + domain + '/api/hive.json'
return cls.from_url(url, version=version, require_https=require_https) | Try to find a hive for the given domain; raise an error if we have to
failover to HTTP and haven't explicitly suppressed it in the call. |
def search_feature_sets(self, dataset_id):
"""
Returns an iterator over the FeatureSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the
:class:`ga4gh.protocol.Dataset` of interest.
:return: An iterator over the :class:`ga4gh.protocol.FeatureSet`
objects defined by the query parameters.
"""
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "featuresets", protocol.SearchFeatureSetsResponse) | Returns an iterator over the FeatureSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the
:class:`ga4gh.protocol.Dataset` of interest.
:return: An iterator over the :class:`ga4gh.protocol.FeatureSet`
objects defined by the query parameters. |
def check_files(cls, dap):
'''Check that there are only those files the standard accepts.
Return list of DapProblems.'''
problems = list()
dirname = os.path.dirname(dap._meta_location)
if dirname:
dirname += '/'
files = [f for f in dap.files if f.startswith(dirname)]
if len(files) == 1:
msg = 'Only meta.yaml in dap'
problems.append(DapProblem(msg, level=logging.WARNING))
return problems
files.remove(dirname + 'meta.yaml')
# Report and remove empty directories until no more are found
emptydirs = dap._get_emptydirs(files)
while emptydirs:
for ed in emptydirs:
msg = ed + ' is empty directory (may be nested)'
problems.append(DapProblem(msg, logging.WARNING))
files.remove(ed)
emptydirs = dap._get_emptydirs(files)
if dap.meta['package_name']:
name = dap.meta['package_name']
dirs = re.compile('^' + dirname + '((assistants(/(crt|twk|prep|extra))?|snippets)(/' +
name + ')?|icons(/(crt|twk|prep|extra|snippets)(/' + name +
')?)?|files|(files/(crt|twk|prep|extra|snippets)|doc)(/' + name +
'(/.+)?)?)$')
regs = re.compile('^' + dirname + '((assistants(/(crt|twk|prep|extra))|snippets)/' +
name + r'(/[^/]+)?\.yaml|icons/(crt|twk|prep|extra|snippets)/' +
name + r'(/[^/]+)?\.(' + Dap._icons_ext +
')|(files/(crt|twk|prep|extra|snippets)|doc)/' + name + '/.+)$')
to_remove = []
for f in files:
if dap._is_dir(f) and not dirs.match(f):
msg = f + '/ is not allowed directory'
problems.append(DapProblem(msg))
to_remove.append(f)
elif not dap._is_dir(f) and not regs.match(f):
msg = f + ' is not allowed file'
problems.append(DapProblem(msg))
to_remove.append(f)
for r in to_remove:
files.remove(r)
# Subdir yamls need a chief
for directory in ['assistants/' + t for t in 'crt twk prep extra'.split()] + \
['snippets']:
prefix = dirname + directory + '/'
for f in files:
if f.startswith(prefix) and dap._is_dir(f) and f + '.yaml' not in files:
msg = f + '/ present, but ' + f + '.yaml missing'
problems.append(DapProblem(msg))
# Missing assistants and/or snippets
if not dap.assistants_and_snippets:
msg = 'No Assistants or Snippets found'
problems.append(DapProblem(msg, level=logging.WARNING))
# Icons
icons = [dap._strip_leading_dirname(i) for i in dap.icons(strip_ext=True)] # we need to report duplicates
assistants = set([dap._strip_leading_dirname(a) for a in dap.assistants]) # duplicates are fine here
duplicates = set([i for i in icons if icons.count(i) > 1])
for d in duplicates:
msg = 'Duplicate icon for ' + f
problems.append(DapProblem(msg, level=logging.WARNING))
icons = set(icons)
for i in icons - assistants:
msg = 'Useless icon for non-exisiting assistant ' + i
problems.append(DapProblem(msg, level=logging.WARNING))
for a in assistants - icons:
msg = 'Missing icon for assistant ' + a
problems.append(DapProblem(msg, level=logging.WARNING))
# Source files
for f in cls._get_files_without_assistants(dap, dirname, files):
msg = 'Useless files for non-exisiting assistant ' + f
problems.append(DapProblem(msg, level=logging.WARNING))
return problems | Check that there are only those files the standard accepts.
Return list of DapProblems. |
def _boxFromData(self, messageData):
"""
A box.
@param messageData: a serialized AMP box representing either a message
or an error.
@type messageData: L{str}
@raise MalformedMessage: if the C{messageData} parameter does not parse
to exactly one AMP box.
"""
inputBoxes = parseString(messageData)
if not len(inputBoxes) == 1:
raise MalformedMessage()
[inputBox] = inputBoxes
return inputBox | A box.
@param messageData: a serialized AMP box representing either a message
or an error.
@type messageData: L{str}
@raise MalformedMessage: if the C{messageData} parameter does not parse
to exactly one AMP box. |
def show(i):
"""
Input: {
(data_uoa) - repo UOA
(reset) - if 'yes', reset repos
(stable) - take stable version (highly experimental)
(version) - checkout version (default - stable)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
o=i.get('out','')
curdir=os.getcwd()
duoa=i.get('data_uoa','')
reset=i.get('reset','')
stable=i.get('stable','')
version=i.get('version','')
if stable=='yes': version='stable'
r=ck.list_data({'module_uoa':work['self_module_uoa'],
'data_uoa':duoa})
if r['return']>0: return r
if o=='con':
ck.out('Please wait - it may take some time ...')
ck.out('')
r=ck.reload_repo_cache({}) # Ignore errors
# Init header.
pp=[({'branch':'branch', 'origin':'origin', 'checkout':'local', 'path':'path', 'type':'type', 'url':'url', 'data_uoa':'data_uoa'})]
il=0
for q in ck.cache_repo_info:
# Get repo info
qq=ck.cache_repo_info[q]
d=qq['dict']
t=d.get('shared','')
if t!='':
duoa=qq['data_uoa']
if len(duoa)>il: il=len(duoa)
p=d.get('path','')
url=d.get('url','')
branch=''
origin=''
checkout=''
if os.path.isdir(p):
# Detect status
os.chdir(p)
if reset=='yes':
r=ck.run_and_get_stdout({'cmd':['git','checkout','master']})
if version!='':
cx=qq.get('dict',{}).get('checkouts',{}).get(version,{})
branch=cx.get('branch','')
checkout=cx.get('checkout','')
if branch!='':
r=ck.run_and_get_stdout({'cmd':['git','checkout',branch]})
if checkout!='':
r=ck.run_and_get_stdout({'cmd':['git','checkout',checkout]})
# FGG TBD: we may need to add explicit check for branch/checkout in repo_deps here?
# OR MAYBE NOT - need to think ...
# Get current branch
r=ck.run_and_get_stdout({'cmd':['git','rev-parse','--abbrev-ref','HEAD']})
if r['return']==0 and r['return_code']==0:
branch=r['stdout'].strip()
# Get origin hash
r=ck.run_and_get_stdout({'cmd':['git','rev-parse','--short','origin/HEAD']})
if r['return']==0 and r['return_code']==0:
origin=r['stdout'].strip()
# Get current hash (append '-dirty' on dirty working tree)
r=ck.run_and_get_stdout({'cmd':['git','describe','--match=NeVeRmAtCh','--always','--abbrev','--dirty']})
if r['return']==0 and r['return_code']==0:
checkout=r['stdout'].strip()
pp.append({'branch':branch, 'origin':origin, 'checkout':checkout, 'path':p, 'type':t, 'url':url, 'data_uoa':duoa})
# Print
for q in pp:
name=q['data_uoa']
x=name+' '*(il-len(name))
branch=q.get('branch','')
origin=q.get('origin','')
checkout=q.get('checkout','')
url=q.get('url','')
if branch!='' or 'origin' or checkout!='' or url!='':
x+=' ( '+branch+' ; '+origin+' ; '+checkout+' ; '+url+' )'
ck.out(x)
os.chdir(curdir)
return {'return':0} | Input: {
(data_uoa) - repo UOA
(reset) - if 'yes', reset repos
(stable) - take stable version (highly experimental)
(version) - checkout version (default - stable)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
} |
def organization_memberships(self, user):
"""
Retrieve the organization memberships for this user.
:param user: User object or id
"""
return self._query_zendesk(self.endpoint.organization_memberships, 'organization_membership', id=user) | Retrieve the organization memberships for this user.
:param user: User object or id |
def image_export(self, image_name, dest_url, remote_host=None):
"""Export the image to the specified location
:param image_name: image name that can be uniquely identify an image
:param dest_url: the location of exported image, eg.
file:///opt/images/export.img, now only support export to remote server
or local server's file system
:param remote_host: the server that the image will be export to, if
remote_host is None, the image will be stored in the dest_path in
local server, the format is username@IP eg. [email protected]
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
}
"""
try:
return self._imageops.image_export(image_name, dest_url,
remote_host)
except exception.SDKBaseException:
LOG.error("Failed to export image '%s'" % image_name)
raise | Export the image to the specified location
:param image_name: image name that can be uniquely identify an image
:param dest_url: the location of exported image, eg.
file:///opt/images/export.img, now only support export to remote server
or local server's file system
:param remote_host: the server that the image will be export to, if
remote_host is None, the image will be stored in the dest_path in
local server, the format is username@IP eg. [email protected]
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
} |
Subsets and Splits