code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _to_DOM(self):
"""
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
"""
root_node = ET.Element("ozone")
reference_time_node = ET.SubElement(root_node, "reference_time")
reference_time_node.text = str(self._reference_time)
reception_time_node = ET.SubElement(root_node, "reception_time")
reception_time_node.text = str(self._reception_time)
interval_node = ET.SubElement(root_node, "interval")
interval_node.text = str(self._interval)
value_node = ET.SubElement(root_node, "value")
value_node.text = str(self.du_value)
root_node.append(self._location._to_DOM())
return root_node | Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object |
def add_ovsbridge_linuxbridge(name, bridge):
''' Add linux bridge to the named openvswitch bridge
:param name: Name of ovs bridge to be added to Linux bridge
:param bridge: Name of Linux bridge to be added to ovs bridge
:returns: True if veth is added between ovs bridge and linux bridge,
False otherwise'''
try:
import netifaces
except ImportError:
if six.PY2:
apt_install('python-netifaces', fatal=True)
else:
apt_install('python3-netifaces', fatal=True)
import netifaces
# NOTE(jamespage):
# Older code supported addition of a linuxbridge directly
# to an OVS bridge; ensure we don't break uses on upgrade
existing_ovs_bridge = port_to_br(bridge)
if existing_ovs_bridge is not None:
log('Linuxbridge {} is already directly in use'
' by OVS bridge {}'.format(bridge, existing_ovs_bridge),
level=INFO)
return
# NOTE(jamespage):
# preserve existing naming because interfaces may already exist.
ovsbridge_port = "veth-" + name
linuxbridge_port = "veth-" + bridge
if (len(ovsbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN or
len(linuxbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN):
# NOTE(jamespage):
# use parts of hashed bridgename (openstack style) when
# a bridge name exceeds 15 chars
hashed_bridge = hashlib.sha256(bridge.encode('UTF-8')).hexdigest()
base = '{}-{}'.format(hashed_bridge[:8], hashed_bridge[-2:])
ovsbridge_port = "cvo{}".format(base)
linuxbridge_port = "cvb{}".format(base)
interfaces = netifaces.interfaces()
for interface in interfaces:
if interface == ovsbridge_port or interface == linuxbridge_port:
log('Interface {} already exists'.format(interface), level=INFO)
return
log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name),
level=INFO)
check_for_eni_source()
with open('/etc/network/interfaces.d/{}.cfg'.format(
linuxbridge_port), 'w') as config:
config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port,
ovsbridge_port=ovsbridge_port,
bridge=bridge))
subprocess.check_call(["ifup", linuxbridge_port])
add_bridge_port(name, linuxbridge_port) | Add linux bridge to the named openvswitch bridge
:param name: Name of ovs bridge to be added to Linux bridge
:param bridge: Name of Linux bridge to be added to ovs bridge
:returns: True if veth is added between ovs bridge and linux bridge,
False otherwise |
def get_affinity_group_properties(self, affinity_group_name):
'''
Returns the system properties associated with the specified affinity
group.
affinity_group_name:
The name of the affinity group.
'''
_validate_not_none('affinity_group_name', affinity_group_name)
return self._perform_get(
'/' + self.subscription_id + '/affinitygroups/' +
_str(affinity_group_name) + '',
AffinityGroup) | Returns the system properties associated with the specified affinity
group.
affinity_group_name:
The name of the affinity group. |
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : str
The offset string or object representing target conversion.
how : str
Method for down/re-sampling, default to 'mean' for downsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).mean()``, or
``.resample(...).apply(<func>)``
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
fill_method : str, default None
Filling method for upsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).<func>()``,
e.g. ``.resample(...).pad()``
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
limit : int, default None
Maximum size gap when reindexing with `fill_method`.
.. deprecated:: 0.18.0
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
.. versionadded:: 0.19.0
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
.. versionadded:: 0.19.0
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import (resample,
_maybe_process_deprecations)
axis = self._get_axis_number(axis)
r = resample(self, freq=rule, label=label, closed=closed,
axis=axis, kind=kind, loffset=loffset,
convention=convention,
base=base, key=on, level=level)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit) | Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : str
The offset string or object representing target conversion.
how : str
Method for down/re-sampling, default to 'mean' for downsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).mean()``, or
``.resample(...).apply(<func>)``
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
fill_method : str, default None
Filling method for upsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).<func>()``,
e.g. ``.resample(...).pad()``
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
limit : int, default None
Maximum size gap when reindexing with `fill_method`.
.. deprecated:: 0.18.0
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
.. versionadded:: 0.19.0
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
.. versionadded:: 0.19.0
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90 |
def read_api_service(self, name, **kwargs): # noqa: E501
"""read_api_service # noqa: E501
read the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_api_service_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_api_service_with_http_info(name, **kwargs) # noqa: E501
return data | read_api_service # noqa: E501
read the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1APIService
If the method is called asynchronously,
returns the request thread. |
def as_unit(self, unit, location='suffix', *args, **kwargs):
"""Format subset as with units
:param unit: string to use as unit
:param location: prefix or suffix
:param subset: Pandas subset
"""
f = Formatter(
as_unit(unit, location=location),
args,
kwargs
)
return self._add_formatter(f) | Format subset as with units
:param unit: string to use as unit
:param location: prefix or suffix
:param subset: Pandas subset |
def project_leave(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /project-xxxx/leave API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Project-Permissions-and-Sharing#API-method%3A-%2Fproject-xxxx%2Fleave
"""
return DXHTTPRequest('/%s/leave' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /project-xxxx/leave API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Project-Permissions-and-Sharing#API-method%3A-%2Fproject-xxxx%2Fleave |
def list_files(tag='', sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '' and 'ascii'.
If '' is specified, the primary data type (ascii) is loaded.
(default='')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and tag is not None:
if tag == '' or tag == 'ascii':
ascii_fmt = 'Density_3deg_{year:02d}_{doy:03d}.ascii'
return pysat.Files.from_os(data_path=data_path,
format_str=ascii_fmt)
else:
raise ValueError('Unrecognized tag name for CHAMP STAR')
elif format_str is None:
estr = 'A tag name must be passed to the loading routine for CHAMP'
raise ValueError(estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str) | Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '' and 'ascii'.
If '' is specified, the primary data type (ascii) is loaded.
(default='')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files |
def windows_k_distinct(x, k):
"""Find all largest windows containing exactly k distinct elements
:param x: list or string
:param k: positive integer
:yields: largest intervals [i, j) with len(set(x[i:j])) == k
:complexity: `O(|x|)`
"""
dist, i, j = 0, 0, 0 # dist = |{x[i], ..., x[j-1]}|
occ = {xi: 0 for xi in x} # number of occurrences in x[i:j]
while j < len(x):
while dist == k: # move start of interval
occ[x[i]] -= 1 # update counters
if occ[x[i]] == 0:
dist -= 1
i += 1
while j < len(x) and (dist < k or occ[x[j]]):
if occ[x[j]] == 0: # update counters
dist += 1
occ[x[j]] += 1
j += 1 # move end of interval
if dist == k:
yield (i, j) | Find all largest windows containing exactly k distinct elements
:param x: list or string
:param k: positive integer
:yields: largest intervals [i, j) with len(set(x[i:j])) == k
:complexity: `O(|x|)` |
def from_string(data_str):
"""Creates a MonsoonData object from a string representation generated
by __str__.
Args:
str: The string representation of a MonsoonData.
Returns:
A MonsoonData object.
"""
lines = data_str.strip().split('\n')
err_msg = ("Invalid input string format. Is this string generated by "
"MonsoonData class?")
conditions = [
len(lines) <= 4, "Average Current:" not in lines[1],
"Voltage: " not in lines[2], "Total Power: " not in lines[3],
"samples taken at " not in lines[4],
lines[5] != "Time" + ' ' * 7 + "Amp"
]
if any(conditions):
raise MonsoonError(err_msg)
hz_str = lines[4].split()[2]
hz = int(hz_str[:-2])
voltage_str = lines[2].split()[1]
voltage = int(voltage_str[:-1])
lines = lines[6:]
t = []
v = []
for l in lines:
try:
timestamp, value = l.split(' ')
t.append(int(timestamp))
v.append(float(value))
except ValueError:
raise MonsoonError(err_msg)
return MonsoonData(v, t, hz, voltage) | Creates a MonsoonData object from a string representation generated
by __str__.
Args:
str: The string representation of a MonsoonData.
Returns:
A MonsoonData object. |
def new_main_mod(self,ns=None):
"""Return a new 'main' module object for user code execution.
"""
main_mod = self._user_main_module
init_fakemod_dict(main_mod,ns)
return main_mod | Return a new 'main' module object for user code execution. |
def nested_insert(self, item_list):
""" Create a series of nested LIVVDicts given a list """
if len(item_list) == 1:
self[item_list[0]] = LIVVDict()
elif len(item_list) > 1:
if item_list[0] not in self:
self[item_list[0]] = LIVVDict()
self[item_list[0]].nested_insert(item_list[1:]) | Create a series of nested LIVVDicts given a list |
def strip_filter(value):
'''
Strips HTML tags from strings according to SANITIZER_ALLOWED_TAGS,
SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in
settings.
Example usage:
{% load sanitizer %}
{{ post.content|strip_html }}
'''
if isinstance(value, basestring):
value = bleach.clean(value, tags=ALLOWED_TAGS,
attributes=ALLOWED_ATTRIBUTES,
styles=ALLOWED_STYLES, strip=True)
return value | Strips HTML tags from strings according to SANITIZER_ALLOWED_TAGS,
SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in
settings.
Example usage:
{% load sanitizer %}
{{ post.content|strip_html }} |
def getall(self, key, failobj=None):
"""Returns a list of all the matching values for key,
containing a single entry for unambiguous matches and
multiple entries for ambiguous matches."""
if self.mmkeys is None: self._mmInit()
k = self.mmkeys.get(key)
if not k: return failobj
return list(map(self.data.get, k)) | Returns a list of all the matching values for key,
containing a single entry for unambiguous matches and
multiple entries for ambiguous matches. |
def role_search(auth=None, **kwargs):
'''
Search roles
CLI Example:
.. code-block:: bash
salt '*' keystoneng.role_search
salt '*' keystoneng.role_search name=role1
salt '*' keystoneng.role_search domain_id=b62e76fbeeff4e8fb77073f591cf211e
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.search_roles(**kwargs) | Search roles
CLI Example:
.. code-block:: bash
salt '*' keystoneng.role_search
salt '*' keystoneng.role_search name=role1
salt '*' keystoneng.role_search domain_id=b62e76fbeeff4e8fb77073f591cf211e |
def set_exception(self, exception):
"""Signal unsuccessful completion."""
was_handled = self._finish(self.errbacks, exception)
if not was_handled:
traceback.print_exception(
type(exception), exception, exception.__traceback__) | Signal unsuccessful completion. |
def desc(self, table):
'''Returns table description
>>> yql.desc('geo.countries')
>>>
'''
query = "desc {0}".format(table)
response = self.raw_query(query)
return response | Returns table description
>>> yql.desc('geo.countries')
>>> |
def commit(self):
"""! @brief Write all collected data to flash.
This routine ensures that chip erase is only used once if either the auto mode or chip
erase mode are used. As an example, if two regions are to be written to and True was
passed to the constructor for chip_erase (or if the session option was set), then only
the first region will actually use chip erase. The second region will be forced to use
sector erase. This will not result in extra erasing, as sector erase always verifies whether
the sectors are already erased. This will, of course, also work correctly if the flash
algorithm for the first region doesn't actually erase the entire chip (all regions).
After calling this method, the loader instance can be reused to program more data.
"""
didChipErase = False
perfList = []
# Iterate over builders we've created and program the data.
for builder in sorted(self._builders.values(), key=lambda v: v.flash_start):
# Determine this builder's portion of total progress.
self._current_progress_fraction = builder.buffered_data_size / self._total_data_size
# Program the data.
chipErase = self._chip_erase if not didChipErase else False
perf = builder.program(chip_erase=chipErase,
progress_cb=self._progress_cb,
smart_flash=self._smart_flash,
fast_verify=self._trust_crc,
keep_unwritten=self._keep_unwritten)
perfList.append(perf)
didChipErase = True
self._progress_offset += self._current_progress_fraction
# Report programming statistics.
self._log_performance(perfList)
# Clear state to allow reuse.
self._reset_state() | ! @brief Write all collected data to flash.
This routine ensures that chip erase is only used once if either the auto mode or chip
erase mode are used. As an example, if two regions are to be written to and True was
passed to the constructor for chip_erase (or if the session option was set), then only
the first region will actually use chip erase. The second region will be forced to use
sector erase. This will not result in extra erasing, as sector erase always verifies whether
the sectors are already erased. This will, of course, also work correctly if the flash
algorithm for the first region doesn't actually erase the entire chip (all regions).
After calling this method, the loader instance can be reused to program more data. |
def plot_gos(self, fout_img, goids=None, **kws_usr):
"""Plot GO IDs."""
gosubdagplot = self.get_gosubdagplot(goids, **kws_usr) # GoSubDagPlot
gosubdagplot.plt_dag(fout_img) | Plot GO IDs. |
def deserialize_durable_record_to_durable_model(record, durable_model):
"""
Utility function that will take a Dynamo event record and turn it into the proper Durable Dynamo object.
This will properly deserialize the ugly Dynamo datatypes away.
:param record:
:param durable_model:
:return:
"""
# Was the item in question too big for SNS? If so, then we need to fetch the item from the current Dynamo table:
if record.get(EVENT_TOO_BIG_FLAG):
return get_full_durable_object(record['dynamodb']['Keys']['arn']['S'],
record['dynamodb']['NewImage']['eventTime']['S'],
durable_model)
new_image = remove_global_dynamo_specific_fields(record['dynamodb']['NewImage'])
data = {}
for item, value in new_image.items():
# This could end up as loss of precision
data[item] = DESER.deserialize(value)
return durable_model(**data) | Utility function that will take a Dynamo event record and turn it into the proper Durable Dynamo object.
This will properly deserialize the ugly Dynamo datatypes away.
:param record:
:param durable_model:
:return: |
def get_process_flow(self, pid=None):
'''
get_process_flow(self, pid=None)
Get process in flow context. The response returns a sub-tree of the whole flow containing the requested process, its direct children processes, and all ancestors.
You can navigate within the flow backword and forward by running this call on the children or ancestors of a given process.
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
'''
pid = self._get_pid(pid)
return self._call_rest_api('get', '/processes/'+pid+'/flow', error='Failed to fetch process information') | get_process_flow(self, pid=None)
Get process in flow context. The response returns a sub-tree of the whole flow containing the requested process, its direct children processes, and all ancestors.
You can navigate within the flow backword and forward by running this call on the children or ancestors of a given process.
:Parameters:
* *pid* (`string`) -- Identifier of an existing process |
def _create_technical_words_dictionary(spellchecker_cache_path,
relative_path,
user_words,
shadow):
"""Create Dictionary at spellchecker_cache_path with technical words."""
technical_terms_set = (user_words |
technical_words_from_shadow_contents(shadow))
technical_words = Dictionary(technical_terms_set,
"technical_words_" +
relative_path.replace(os.path.sep, "_"),
[os.path.realpath(relative_path)],
spellchecker_cache_path)
return technical_words | Create Dictionary at spellchecker_cache_path with technical words. |
def auto_inline_code(self, node):
"""Try to automatically generate nodes for inline literals.
Parameters
----------
node : nodes.literal
Original codeblock node
Returns
-------
tocnode: docutils node
The converted toc tree node, None if conversion is not possible.
"""
assert isinstance(node, nodes.literal)
if len(node.children) != 1:
return None
content = node.children[0]
if not isinstance(content, nodes.Text):
return None
content = content.astext().strip()
if content.startswith('$') and content.endswith('$'):
if not self.config['enable_inline_math']:
return None
content = content[1:-1]
self.state_machine.reset(self.document,
node.parent,
self.current_level)
return self.state_machine.run_role('math', content=content)
else:
return None | Try to automatically generate nodes for inline literals.
Parameters
----------
node : nodes.literal
Original codeblock node
Returns
-------
tocnode: docutils node
The converted toc tree node, None if conversion is not possible. |
def keypoint_vflip(kp, rows, cols):
"""Flip a keypoint vertically around the x-axis."""
x, y, angle, scale = kp
c = math.cos(angle)
s = math.sin(angle)
angle = math.atan2(-s, c)
return [x, (rows - 1) - y, angle, scale] | Flip a keypoint vertically around the x-axis. |
def threads_init(gtk=True):
"""Enables multithreading support in Xlib and PyGTK.
See the module docstring for more info.
:Parameters:
gtk : bool
May be set to False to skip the PyGTK module.
"""
# enable X11 multithreading
x11.XInitThreads()
if gtk:
from gtk.gdk import threads_init
threads_init() | Enables multithreading support in Xlib and PyGTK.
See the module docstring for more info.
:Parameters:
gtk : bool
May be set to False to skip the PyGTK module. |
def write_roi(self, outfile=None,
save_model_map=False, **kwargs):
"""Write current state of the analysis to a file. This method
writes an XML model definition, a ROI dictionary, and a FITS
source catalog file. A previously saved analysis state can be
reloaded from the ROI dictionary file with the
`~fermipy.gtanalysis.GTAnalysis.load_roi` method.
Parameters
----------
outfile : str
String prefix of the output files. The extension of this
string will be stripped when generating the XML, YAML and
npy filenames.
make_plots : bool
Generate diagnostic plots.
save_model_map : bool
Save the current counts model to a FITS file.
"""
# extract the results in a convenient format
make_plots = kwargs.get('make_plots', False)
save_weight_map = kwargs.get('save_weight_map', False)
if outfile is None:
pathprefix = os.path.join(self.config['fileio']['workdir'],
'results')
elif not os.path.isabs(outfile):
pathprefix = os.path.join(self.config['fileio']['workdir'],
outfile)
else:
pathprefix = outfile
pathprefix = utils.strip_suffix(pathprefix,
['fits', 'yaml', 'npy'])
# pathprefix, ext = os.path.splitext(pathprefix)
prefix = os.path.basename(pathprefix)
xmlfile = pathprefix + '.xml'
fitsfile = pathprefix + '.fits'
npyfile = pathprefix + '.npy'
self.write_xml(xmlfile)
self.write_fits(fitsfile)
if not self.config['gtlike']['use_external_srcmap']:
for c in self.components:
c.like.logLike.saveSourceMaps(str(c.files['srcmap']))
if save_model_map:
self.write_model_map(prefix)
if save_weight_map:
self.write_weight_map(prefix)
o = {}
o['roi'] = copy.deepcopy(self._roi_data)
o['config'] = copy.deepcopy(self.config)
o['version'] = fermipy.__version__
o['stversion'] = fermipy.get_st_version()
o['sources'] = {}
for s in self.roi.sources:
o['sources'][s.name] = copy.deepcopy(s.data)
for i, c in enumerate(self.components):
o['roi']['components'][i][
'src_expscale'] = copy.deepcopy(c.src_expscale)
self.logger.info('Writing %s...', npyfile)
np.save(npyfile, o)
if make_plots:
self.make_plots(prefix, None,
**kwargs.get('plotting', {})) | Write current state of the analysis to a file. This method
writes an XML model definition, a ROI dictionary, and a FITS
source catalog file. A previously saved analysis state can be
reloaded from the ROI dictionary file with the
`~fermipy.gtanalysis.GTAnalysis.load_roi` method.
Parameters
----------
outfile : str
String prefix of the output files. The extension of this
string will be stripped when generating the XML, YAML and
npy filenames.
make_plots : bool
Generate diagnostic plots.
save_model_map : bool
Save the current counts model to a FITS file. |
def MapByteStream(self, byte_stream, **unused_kwargs): # pylint: disable=redundant-returns-doc
"""Maps the data type on a byte stream.
Args:
byte_stream (bytes): byte stream.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
"""
raise errors.MappingError(
'Unable to map {0:s} data type to byte stream'.format(
self._data_type_definition.TYPE_INDICATOR)) | Maps the data type on a byte stream.
Args:
byte_stream (bytes): byte stream.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream. |
def get_mime_data(self, mime_type):
"""Return mime data previously attached to surface
using the specified mime type.
:param mime_type: The MIME type of the image data.
:type mime_type: ASCII string
:returns:
A CFFI buffer object, or :obj:`None`
if no data has been attached with the given mime type.
*New in cairo 1.10.*
"""
buffer_address = ffi.new('unsigned char **')
buffer_length = ffi.new('unsigned long *')
mime_type = ffi.new('char[]', mime_type.encode('utf8'))
cairo.cairo_surface_get_mime_data(
self._pointer, mime_type, buffer_address, buffer_length)
return (ffi.buffer(buffer_address[0], buffer_length[0])
if buffer_address[0] != ffi.NULL else None) | Return mime data previously attached to surface
using the specified mime type.
:param mime_type: The MIME type of the image data.
:type mime_type: ASCII string
:returns:
A CFFI buffer object, or :obj:`None`
if no data has been attached with the given mime type.
*New in cairo 1.10.* |
def create_volume(client, resource_group_name,
name, location,
template_file=None, template_uri=None):
"""Create a volume. """
volume_properties = None
if template_uri:
volume_properties = shell_safe_json_parse(_urlretrieve(template_uri).decode('utf-8'), preserve_order=True)
elif template_file:
volume_properties = get_file_json(template_file, preserve_order=True)
volume_properties = json.loads(json.dumps(volume_properties))
else:
raise CLIError('One of --template-file or --template-uri has to be specified')
volume_properties['location'] = location
return client.create(resource_group_name, name, volume_properties) | Create a volume. |
def cut_video_stream(stream, start, end, fmt):
""" cut video stream from `start` to `end` time
Parameters
----------
stream : bytes
video file content
start : float
start time
end : float
end time
Returns
-------
result : bytes
content of cut video
"""
with TemporaryDirectory() as tmp:
in_file = Path(tmp) / f"in{fmt}"
out_file = Path(tmp) / f"out{fmt}"
in_file.write_bytes(stream)
try:
ret = subprocess.run(
[
"ffmpeg",
"-ss",
f"{start}",
"-i",
f"{in_file}",
"-to",
f"{end}",
"-c",
"copy",
f"{out_file}",
],
capture_output=True,
)
except FileNotFoundError:
result = stream
else:
if ret.returncode:
result = stream
else:
result = out_file.read_bytes()
return result | cut video stream from `start` to `end` time
Parameters
----------
stream : bytes
video file content
start : float
start time
end : float
end time
Returns
-------
result : bytes
content of cut video |
def getRandomBinaryTreeLeafNode(binaryTree):
"""Get random binary tree node.
"""
if binaryTree.internal == True:
if random.random() > 0.5:
return getRandomBinaryTreeLeafNode(binaryTree.left)
else:
return getRandomBinaryTreeLeafNode(binaryTree.right)
else:
return binaryTree | Get random binary tree node. |
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
self.copied_files.append(prefixed_path) | Attempt to copy ``path`` with storage |
def update_comment(self, comment_id, body):
"""
Update a specific comment. This can be used to edit the content of an
existing comment.
"""
path = '/msg/update_comment'
req = ET.Element('request')
ET.SubElement(req, 'comment_id').text = str(int(comment_id))
comment = ET.SubElement(req, 'comment')
ET.SubElement(comment, 'body').text = str(body)
return self._request(path, req) | Update a specific comment. This can be used to edit the content of an
existing comment. |
def default_panels(institute_id, case_name):
"""Update default panels for a case."""
panel_ids = request.form.getlist('panel_ids')
controllers.update_default_panels(store, current_user, institute_id, case_name, panel_ids)
return redirect(request.referrer) | Update default panels for a case. |
def asList(self):
""" returns a Point value as a list of [x,y,<z>,<m>] """
base = [self._x, self._y]
if not self._z is None:
base.append(self._z)
elif not self._m is None:
base.append(self._m)
return base | returns a Point value as a list of [x,y,<z>,<m>] |
def intersection(self, *others):
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in list(_elements.items()):
new_multiplicity = other.get(element, 0)
if new_multiplicity < multiplicity:
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity - new_multiplicity
else:
del _elements[element]
_total -= multiplicity
result._total = _total
return result | r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets. |
def set_basic_params(
self, workers=None, zerg_server=None, fallback_node=None, concurrent_events=None,
cheap_mode=None, stats_server=None, quiet=None, buffer_size=None,
fallback_nokey=None, subscription_key=None, emperor_command_socket=None):
"""
:param int workers: Number of worker processes to spawn.
:param str|unicode zerg_server: Attach the router to a zerg server.
:param str|unicode fallback_node: Fallback to the specified node in case of error.
:param int concurrent_events: Set the maximum number of concurrent events router can manage.
Default: system dependent.
:param bool cheap_mode: Enables cheap mode. When the router is in cheap mode,
it will not respond to requests until a node is available.
This means that when there are no nodes subscribed, only your local app (if any) will respond.
When all of the nodes go down, the router will return in cheap mode.
:param str|unicode stats_server: Router stats server address to run at.
:param bool quiet: Do not report failed connections to instances.
:param int buffer_size: Set internal buffer size in bytes. Default: page size.
:param bool fallback_nokey: Move to fallback node even if a subscription key is not found.
:param str|unicode subscription_key: Skip uwsgi parsing and directly set a key.
:param str|unicode emperor_command_socket: Set the emperor command socket that will receive spawn commands.
See `.empire.set_emperor_command_params()`.
"""
super(RouterFast, self).set_basic_params(**filter_locals(locals(), [
'fallback_nokey',
'subscription_key',
'emperor_command_socket',
]))
self._set_aliased('fallback-on-no-key', fallback_nokey, cast=bool)
self._set_aliased('force-key', subscription_key)
self._set_aliased('emperor-socket', emperor_command_socket)
return self | :param int workers: Number of worker processes to spawn.
:param str|unicode zerg_server: Attach the router to a zerg server.
:param str|unicode fallback_node: Fallback to the specified node in case of error.
:param int concurrent_events: Set the maximum number of concurrent events router can manage.
Default: system dependent.
:param bool cheap_mode: Enables cheap mode. When the router is in cheap mode,
it will not respond to requests until a node is available.
This means that when there are no nodes subscribed, only your local app (if any) will respond.
When all of the nodes go down, the router will return in cheap mode.
:param str|unicode stats_server: Router stats server address to run at.
:param bool quiet: Do not report failed connections to instances.
:param int buffer_size: Set internal buffer size in bytes. Default: page size.
:param bool fallback_nokey: Move to fallback node even if a subscription key is not found.
:param str|unicode subscription_key: Skip uwsgi parsing and directly set a key.
:param str|unicode emperor_command_socket: Set the emperor command socket that will receive spawn commands.
See `.empire.set_emperor_command_params()`. |
def accept(self, deviceId, device):
"""
Adds the named device to the store.
:param deviceId:
:param device:
:return:
"""
storedDevice = self.devices.get(deviceId)
if storedDevice is None:
logger.info('Initialising device ' + deviceId)
storedDevice = Device(self.maxAgeSeconds)
storedDevice.deviceId = deviceId
# this uses an async handler to decouple the recorder put (of the data) from the analyser handling that data
# thus the recorder will become free as soon as it has handed off the data. This means delivery is only
# guaranteed as long as the analyser stays up but this is not a system that sits on top of a bulletproof
# message bus so unlucky :P
storedDevice.dataHandler = AsyncHandler('analyser', CSVLogger('analyser', deviceId, self.dataDir))
else:
logger.debug('Pinged by device ' + deviceId)
storedDevice.payload = device
storedDevice.lastUpdateTime = datetime.datetime.utcnow()
# TODO if device has FAILED, do something?
self.devices.update({deviceId: storedDevice})
self.targetStateController.updateDeviceState(storedDevice.payload) | Adds the named device to the store.
:param deviceId:
:param device:
:return: |
def find_span_binsearch(degree, knot_vector, num_ctrlpts, knot, **kwargs):
""" Finds the span of the knot over the input knot vector using binary search.
Implementation of Algorithm A2.1 from The NURBS Book by Piegl & Tiller.
The NURBS Book states that the knot span index always starts from zero, i.e. for a knot vector [0, 0, 1, 1];
if FindSpan returns 1, then the knot is between the interval [0, 1).
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param num_ctrlpts: number of control points, :math:`n + 1`
:type num_ctrlpts: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: knot span
:rtype: int
"""
# Get tolerance value
tol = kwargs.get('tol', 10e-6)
# In The NURBS Book; number of knots = m + 1, number of control points = n + 1, p = degree
# All knot vectors should follow the rule: m = p + n + 1
n = num_ctrlpts - 1
if abs(knot_vector[n + 1] - knot) <= tol:
return n
# Set max and min positions of the array to be searched
low = degree
high = num_ctrlpts
# The division could return a float value which makes it impossible to use as an array index
mid = (low + high) / 2
# Direct int casting would cause numerical errors due to discarding the significand figures (digits after the dot)
# The round function could return unexpected results, so we add the floating point with some small number
# This addition would solve the issues caused by the division operation and how Python stores float numbers.
# E.g. round(13/2) = 6 (expected to see 7)
mid = int(round(mid + tol))
# Search for the span
while (knot < knot_vector[mid]) or (knot >= knot_vector[mid + 1]):
if knot < knot_vector[mid]:
high = mid
else:
low = mid
mid = int((low + high) / 2)
return mid | Finds the span of the knot over the input knot vector using binary search.
Implementation of Algorithm A2.1 from The NURBS Book by Piegl & Tiller.
The NURBS Book states that the knot span index always starts from zero, i.e. for a knot vector [0, 0, 1, 1];
if FindSpan returns 1, then the knot is between the interval [0, 1).
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param num_ctrlpts: number of control points, :math:`n + 1`
:type num_ctrlpts: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: knot span
:rtype: int |
def __raise_user_error(self, view):
"""
Raises an error if the given View has been set read only and the user attempted to edit its content.
:param view: View.
:type view: QWidget
"""
raise foundations.exceptions.UserError("{0} | Cannot perform action, '{1}' View has been set read only!".format(
self.__class__.__name__, view.objectName() or view)) | Raises an error if the given View has been set read only and the user attempted to edit its content.
:param view: View.
:type view: QWidget |
def kitchen_get(backend, kitchen_name, recipe):
"""
Get an existing Kitchen
"""
found_kitchen = DKKitchenDisk.find_kitchen_name()
if found_kitchen is not None and len(found_kitchen) > 0:
raise click.ClickException("You cannot get a kitchen into an existing kitchen directory structure.")
if len(recipe) > 0:
click.secho("%s - Getting kitchen '%s' and the recipes %s" % (get_datetime(), kitchen_name, str(recipe)), fg='green')
else:
click.secho("%s - Getting kitchen '%s'" % (get_datetime(), kitchen_name), fg='green')
check_and_print(DKCloudCommandRunner.get_kitchen(backend.dki, kitchen_name, os.getcwd(), recipe)) | Get an existing Kitchen |
def guess_wxr_version(self, tree):
"""
We will try to guess the wxr version used
to complete the wordpress xml namespace name.
"""
for v in ('1.2', '1.1', '1.0'):
try:
tree.find('channel/{%s}wxr_version' % (WP_NS % v)).text
return v
except AttributeError:
pass
raise CommandError('Cannot resolve the wordpress namespace') | We will try to guess the wxr version used
to complete the wordpress xml namespace name. |
def catalog_register(consul_url=None, token=None, **kwargs):
'''
Registers a new node, service, or check
:param consul_url: The Consul server URL.
:param dc: By default, the datacenter of the agent is queried;
however, the dc can be provided using the "dc" parameter.
:param node: The node to register.
:param address: The address of the node.
:param service: The service that will be registered.
:param service_address: The address that the service listens on.
:param service_port: The port for the service.
:param service_id: A unique identifier for the service, if this is not
provided "name" will be used.
:param service_tags: Any tags associated with the service.
:param check: The name of the health check to register
:param check_status: The initial status of the check,
must be one of unknown, passing, warning, or critical.
:param check_service: The service that the check is performed against.
:param check_id: Unique identifier for the service.
:param check_notes: An opaque field that is meant to hold human-readable text.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.catalog_register node='node1' address='192.168.1.1' service='redis' service_address='127.0.0.1' service_port='8080' service_id='redis_server1'
'''
ret = {}
data = {}
data['NodeMeta'] = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
if 'datacenter' in kwargs:
data['Datacenter'] = kwargs['datacenter']
if 'node' in kwargs:
data['Node'] = kwargs['node']
else:
ret['message'] = 'Required argument node argument is missing.'
ret['res'] = False
return ret
if 'address' in kwargs:
if isinstance(kwargs['address'], list):
_address = kwargs['address'][0]
else:
_address = kwargs['address']
data['Address'] = _address
else:
ret['message'] = 'Required argument address argument is missing.'
ret['res'] = False
return ret
if 'ip_interfaces' in kwargs:
data['TaggedAddresses'] = {}
for k in kwargs['ip_interfaces']:
if kwargs['ip_interfaces'].get(k):
data['TaggedAddresses'][k] = kwargs['ip_interfaces'][k][0]
if 'service' in kwargs:
data['Service'] = {}
data['Service']['Service'] = kwargs['service']
if 'service_address' in kwargs:
data['Service']['Address'] = kwargs['service_address']
if 'service_port' in kwargs:
data['Service']['Port'] = kwargs['service_port']
if 'service_id' in kwargs:
data['Service']['ID'] = kwargs['service_id']
if 'service_tags' in kwargs:
_tags = kwargs['service_tags']
if not isinstance(_tags, list):
_tags = [_tags]
data['Service']['Tags'] = _tags
if 'cpu' in kwargs:
data['NodeMeta']['Cpu'] = kwargs['cpu']
if 'num_cpus' in kwargs:
data['NodeMeta']['Cpu_num'] = kwargs['num_cpus']
if 'mem' in kwargs:
data['NodeMeta']['Memory'] = kwargs['mem']
if 'oscode' in kwargs:
data['NodeMeta']['Os'] = kwargs['oscode']
if 'osarch' in kwargs:
data['NodeMeta']['Osarch'] = kwargs['osarch']
if 'kernel' in kwargs:
data['NodeMeta']['Kernel'] = kwargs['kernel']
if 'kernelrelease' in kwargs:
data['NodeMeta']['Kernelrelease'] = kwargs['kernelrelease']
if 'localhost' in kwargs:
data['NodeMeta']['localhost'] = kwargs['localhost']
if 'nodename' in kwargs:
data['NodeMeta']['nodename'] = kwargs['nodename']
if 'os_family' in kwargs:
data['NodeMeta']['os_family'] = kwargs['os_family']
if 'lsb_distrib_description' in kwargs:
data['NodeMeta']['lsb_distrib_description'] = kwargs['lsb_distrib_description']
if 'master' in kwargs:
data['NodeMeta']['master'] = kwargs['master']
if 'check' in kwargs:
data['Check'] = {}
data['Check']['Name'] = kwargs['check']
if 'check_status' in kwargs:
if kwargs['check_status'] not in ('unknown', 'passing', 'warning', 'critical'):
ret['message'] = 'Check status must be unknown, passing, warning, or critical.'
ret['res'] = False
return ret
data['Check']['Status'] = kwargs['check_status']
if 'check_service' in kwargs:
data['Check']['ServiceID'] = kwargs['check_service']
if 'check_id' in kwargs:
data['Check']['CheckID'] = kwargs['check_id']
if 'check_notes' in kwargs:
data['Check']['Notes'] = kwargs['check_notes']
function = 'catalog/register'
res = _query(consul_url=consul_url,
function=function,
token=token,
method='PUT',
data=data)
if res['res']:
ret['res'] = True
ret['message'] = ('Catalog registration '
'for {0} successful.'.format(kwargs['node']))
else:
ret['res'] = False
ret['message'] = ('Catalog registration '
'for {0} failed.'.format(kwargs['node']))
ret['data'] = data
return ret | Registers a new node, service, or check
:param consul_url: The Consul server URL.
:param dc: By default, the datacenter of the agent is queried;
however, the dc can be provided using the "dc" parameter.
:param node: The node to register.
:param address: The address of the node.
:param service: The service that will be registered.
:param service_address: The address that the service listens on.
:param service_port: The port for the service.
:param service_id: A unique identifier for the service, if this is not
provided "name" will be used.
:param service_tags: Any tags associated with the service.
:param check: The name of the health check to register
:param check_status: The initial status of the check,
must be one of unknown, passing, warning, or critical.
:param check_service: The service that the check is performed against.
:param check_id: Unique identifier for the service.
:param check_notes: An opaque field that is meant to hold human-readable text.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.catalog_register node='node1' address='192.168.1.1' service='redis' service_address='127.0.0.1' service_port='8080' service_id='redis_server1' |
def get_shape(self, prune=False, hs_dims=None):
"""Tuple of array dimensions' lengths.
It returns a tuple of ints, each representing the length of a cube
dimension, in the order those dimensions appear in the cube.
Pruning is supported. Dimensions that get reduced to a single element
(e.g. due to pruning) are removed from the returning shape, thus
allowing for the differentiation between true 2D cubes (over which
statistical testing can be performed) and essentially
1D cubes (over which it can't).
Usage:
>>> shape = get_shape()
>>> pruned_shape = get_shape(prune=True)
"""
if not prune:
return self.as_array(include_transforms_for_dims=hs_dims).shape
shape = compress_pruned(
self.as_array(prune=True, include_transforms_for_dims=hs_dims)
).shape
# Eliminate dimensions that get reduced to 1
# (e.g. single element categoricals)
return tuple(n for n in shape if n > 1) | Tuple of array dimensions' lengths.
It returns a tuple of ints, each representing the length of a cube
dimension, in the order those dimensions appear in the cube.
Pruning is supported. Dimensions that get reduced to a single element
(e.g. due to pruning) are removed from the returning shape, thus
allowing for the differentiation between true 2D cubes (over which
statistical testing can be performed) and essentially
1D cubes (over which it can't).
Usage:
>>> shape = get_shape()
>>> pruned_shape = get_shape(prune=True) |
def check_key(user,
key,
enc,
comment,
options,
config='.ssh/authorized_keys',
cache_keys=None,
fingerprint_hash_type=None):
'''
Check to see if a key needs updating, returns "update", "add" or "exists"
CLI Example:
.. code-block:: bash
salt '*' ssh.check_key <user> <key> <enc> <comment> <options>
'''
if cache_keys is None:
cache_keys = []
enc = _refine_enc(enc)
current = auth_keys(user,
config=config,
fingerprint_hash_type=fingerprint_hash_type)
nline = _format_auth_line(key, enc, comment, options)
# Removing existing keys from the auth_keys isn't really a good idea
# in fact
#
# as:
# - We can have non-salt managed keys in that file
# - We can have multiple states defining keys for an user
# and with such code only one state will win
# the remove all-other-keys war
#
# if cache_keys:
# for pub_key in set(current).difference(set(cache_keys)):
# rm_auth_key(user, pub_key)
if key in current:
cline = _format_auth_line(key,
current[key]['enc'],
current[key]['comment'],
current[key]['options'])
if cline != nline:
return 'update'
else:
return 'add'
return 'exists' | Check to see if a key needs updating, returns "update", "add" or "exists"
CLI Example:
.. code-block:: bash
salt '*' ssh.check_key <user> <key> <enc> <comment> <options> |
def ppo_original_params():
"""Parameters based on the original PPO paper."""
hparams = ppo_atari_base()
hparams.learning_rate_constant = 2.5e-4
hparams.gae_gamma = 0.99
hparams.gae_lambda = 0.95
hparams.clipping_coef = 0.1
hparams.value_loss_coef = 1
hparams.entropy_loss_coef = 0.01
hparams.eval_every_epochs = 200
hparams.dropout_ppo = 0.1
# The parameters below are modified to accommodate short epoch_length (which
# is needed for model based rollouts).
hparams.epoch_length = 50
hparams.optimization_batch_size = 20
return hparams | Parameters based on the original PPO paper. |
def delete(self, *keys):
"""Emulate delete."""
key_counter = 0
for key in map(self._encode, keys):
if key in self.redis:
del self.redis[key]
key_counter += 1
if key in self.timeouts:
del self.timeouts[key]
return key_counter | Emulate delete. |
def QA_util_random_with_zh_stock_code(stockNumber=10):
'''
随机生成股票代码
:param stockNumber: 生成个数
:return: ['60XXXX', '00XXXX', '300XXX']
'''
codeList = []
pt = 0
for i in range(stockNumber):
if pt == 0:
#print("random 60XXXX")
iCode = random.randint(600000, 609999)
aCode = "%06d" % iCode
elif pt == 1:
#print("random 00XXXX")
iCode = random.randint(600000, 600999)
aCode = "%06d" % iCode
elif pt == 2:
#print("random 00XXXX")
iCode = random.randint(2000, 9999)
aCode = "%06d" % iCode
elif pt == 3:
#print("random 300XXX")
iCode = random.randint(300000, 300999)
aCode = "%06d" % iCode
elif pt == 4:
#print("random 00XXXX")
iCode = random.randint(2000, 2999)
aCode = "%06d" % iCode
pt = (pt + 1) % 5
codeList.append(aCode)
return codeList | 随机生成股票代码
:param stockNumber: 生成个数
:return: ['60XXXX', '00XXXX', '300XXX'] |
def conn_handler(self, session: ClientSession, proxy: str = None) -> ConnectionHandler:
"""
Return connection handler instance for the endpoint
:param session: AIOHTTP client session instance
:param proxy: Proxy url
:return:
"""
return ConnectionHandler("https", "wss", self.server, self.port, "", session, proxy) | Return connection handler instance for the endpoint
:param session: AIOHTTP client session instance
:param proxy: Proxy url
:return: |
def colors_to_dict(colors, img):
"""Convert list of colors to pywal format."""
return {
"wallpaper": img,
"alpha": util.Color.alpha_num,
"special": {
"background": colors[0],
"foreground": colors[15],
"cursor": colors[15]
},
"colors": {
"color0": colors[0],
"color1": colors[1],
"color2": colors[2],
"color3": colors[3],
"color4": colors[4],
"color5": colors[5],
"color6": colors[6],
"color7": colors[7],
"color8": colors[8],
"color9": colors[9],
"color10": colors[10],
"color11": colors[11],
"color12": colors[12],
"color13": colors[13],
"color14": colors[14],
"color15": colors[15]
}
} | Convert list of colors to pywal format. |
def _set_compact_flash(self, v, load=False):
"""
Setter method for compact_flash, mapped from YANG variable /system_monitor/compact_flash (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_compact_flash is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_compact_flash() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=compact_flash.compact_flash, is_container='container', presence=False, yang_name="compact-flash", rest_name="compact-flash", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold for component:COMPACT-FLASH', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """compact_flash must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=compact_flash.compact_flash, is_container='container', presence=False, yang_name="compact-flash", rest_name="compact-flash", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold for component:COMPACT-FLASH', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""",
})
self.__compact_flash = t
if hasattr(self, '_set'):
self._set() | Setter method for compact_flash, mapped from YANG variable /system_monitor/compact_flash (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_compact_flash is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_compact_flash() directly. |
def confirm_commit(jid):
'''
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
CLI Example:
.. code-block:: bash
salt '*' net.confirm_commit 20180726083540640360
'''
if __grains__['os'] == 'junos':
# Confirm the commit, by committing (i.e., invoking the RPC call)
confirmed = __salt__['napalm.junos_commit']()
confirmed['result'] = confirmed.pop('out')
confirmed['comment'] = confirmed.pop('message')
else:
confirmed = cancel_commit(jid)
if confirmed['result']:
confirmed['comment'] = 'Commit #{jid} confirmed.'.format(jid=jid)
return confirmed | .. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
CLI Example:
.. code-block:: bash
salt '*' net.confirm_commit 20180726083540640360 |
async def Track(self, payloads):
'''
payloads : typing.Sequence[~Payload]
Returns -> typing.Sequence[~PayloadResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='PayloadsHookContext',
request='Track',
version=1,
params=_params)
_params['payloads'] = payloads
reply = await self.rpc(msg)
return reply | payloads : typing.Sequence[~Payload]
Returns -> typing.Sequence[~PayloadResult] |
def _perturbation(self):
"""
Returns Gaussian perturbation
"""
if self.P>1:
scales = []
for term_i in range(self.n_terms):
_scales = SP.randn(self.diag[term_i].shape[0])
if self.offset[term_i]>0:
_scales = SP.concatenate((_scales,SP.zeros(1)))
scales.append(_scales)
scales = SP.concatenate(scales)
else:
scales = SP.randn(self.vd.getNumberScales())
return scales | Returns Gaussian perturbation |
def cmd_rcbind(self, args):
'''start RC bind'''
if len(args) < 1:
print("Usage: rcbind <dsmmode>")
return
self.master.mav.command_long_send(self.settings.target_system,
self.settings.target_component,
mavutil.mavlink.MAV_CMD_START_RX_PAIR,
0,
float(args[0]), 0, 0, 0, 0, 0, 0) | start RC bind |
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
from pandas.core.arrays.categorical import _factorize_from_iterables
codes, levels = _factorize_from_iterables(arrays)
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, codes=codes, sortorder=sortorder,
names=names, verify_integrity=False) | Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color']) |
def p_const_map(self, p):
'''const_map : '{' const_map_seq '}' '''
p[0] = ast.ConstMap(dict(p[2]), p.lineno(1)) | const_map : '{' const_map_seq '}' |
def vor_plot(self, which='vor'):
"""
Voronoi diagram visualizations. There are three types:
1. **vor**: Voronoi diagram of the Solar Horizont.
2. **freq**: Frequency of Sun positions in t in the Voronoi
diagram of the Solar Horizont.
3. **data**: Accumulated time integral of the data projected
in the Voronoi diagram of the Solar Horizont.
:param which: Type of visualization.
:type which: str
:returns: None
"""
import matplotlib.cm as cm
import matplotlib.pyplot as plt
sm = self.SM
if sm.light_vor is None:
raise ValueError('The computation has not been made yet')
if which is 'vor':
title = 'Voronoi diagram of the Solar Horizont'
ax = sm.vor_surf.plot2d('b', alpha=0.15, ret=True, title=title)
ax.scatter(sm.azimuth_zenit[:, 0],sm.azimuth_zenit[:, 1], c='k')
ax.scatter(sm.vor_centers[:, 0], sm.vor_centers[:,1],
s = 30, c = 'red')
ax.set_xlabel('Solar Azimuth')
ax.set_ylabel('Solar Zenit')
plt.show()
elif which is 'freq':
cmap = cm.Blues
title = 'Frequency of Sun positions in the Voronoi diagram '+\
'of the Solar Horizont'
ax = sm.vor_surf.plot2d(sm.vor_freq, cmap=cmap, alpha=0.85,
colorbar=True, title=title, ret=True,
cbar_label=' Freq')
ax.set_xlabel('Solar Azimuth')
ax.set_ylabel('Solar Zenit')
plt.show()
elif which is 'data':
cmap = cm.YlOrRd
title = 'Data projected in the Voronoi diagram of the'+\
' Solar Horizont'
data = sm.proj_vor/sm.vor_freq
proj_data = data*100/data.max()
ax = sm.vor_surf.plot2d(proj_data, alpha=0.85, cmap=cmap,
colorbar=True, title=title, ret=True,
cbar_label='%')
ax.set_xlabel('Solar Azimuth')
ax.set_ylabel('Solar Zenit')
plt.title('max = '+str(data.max())+' kilounits*hour')
plt.show()
else:
raise ValueError('Invalid plot '+which) | Voronoi diagram visualizations. There are three types:
1. **vor**: Voronoi diagram of the Solar Horizont.
2. **freq**: Frequency of Sun positions in t in the Voronoi
diagram of the Solar Horizont.
3. **data**: Accumulated time integral of the data projected
in the Voronoi diagram of the Solar Horizont.
:param which: Type of visualization.
:type which: str
:returns: None |
def _int2coord(x, y, dim):
"""Convert x, y values in dim x dim-grid coordinate system into lng, lat values.
Parameters:
x: int x value of point [0, dim); corresponds to longitude
y: int y value of point [0, dim); corresponds to latitude
dim: int Number of coding points each x, y value can take.
Corresponds to 2^level of the hilbert curve.
Returns:
Tuple[float, float]: (lng, lat)
lng longitude value of coordinate [-180.0, 180.0]; corresponds to X axis
lat latitude value of coordinate [-90.0, 90.0]; corresponds to Y axis
"""
assert dim >= 1
assert x < dim
assert y < dim
lng = x / dim * 360 - 180
lat = y / dim * 180 - 90
return lng, lat | Convert x, y values in dim x dim-grid coordinate system into lng, lat values.
Parameters:
x: int x value of point [0, dim); corresponds to longitude
y: int y value of point [0, dim); corresponds to latitude
dim: int Number of coding points each x, y value can take.
Corresponds to 2^level of the hilbert curve.
Returns:
Tuple[float, float]: (lng, lat)
lng longitude value of coordinate [-180.0, 180.0]; corresponds to X axis
lat latitude value of coordinate [-90.0, 90.0]; corresponds to Y axis |
def decode_xml(elem, _in_bind = False):
""" Decodes an XML element into an OpenMath object.
:param elem: Element to decode.
:type elem: etree._Element
:param _in_bind: Internal flag used to indicate if we should decode within
an OMBind.
:type _in_bind: bool
:rtype: OMAny
"""
obj = xml.tag_to_object(elem)
attrs = {}
def a2d(*props):
for p in props:
attrs[p] = elem.get(p)
if issubclass(obj, om.CommonAttributes):
a2d("id")
if issubclass(obj, om.CDBaseAttribute):
a2d("cdbase")
# Root Object
if issubclass(obj, om.OMObject):
a2d("version")
attrs["omel"] = decode_xml(elem[0])
# Reference Objects
elif issubclass(obj, om.OMReference):
a2d("href")
# Basic Objects
elif issubclass(obj, om.OMInteger):
attrs["integer"] = int(elem.text)
elif issubclass(obj, om.OMFloat):
# TODO: Support Hex
attrs["double"] = float(elem.get('dec'))
elif issubclass(obj, om.OMString):
attrs["string"] = elem.text
elif issubclass(obj, om.OMBytes):
try:
attrs["bytes"] = base64.b64decode(elem.text)
except TypeError:
attrs["bytes"] = base64.b64decode(bytes(elem.text, "ascii"))
elif issubclass(obj, om.OMSymbol):
a2d("name", "cd")
elif issubclass(obj, om.OMVariable):
a2d("name")
# Derived Elements
elif issubclass(obj, om.OMForeign):
attrs["obj"] = elem.text
a2d("encoding")
# Compound Elements
elif issubclass(obj, om.OMApplication):
attrs["elem"] = decode_xml(elem[0])
attrs["arguments"] = list(map(decode_xml, elem[1:]))
elif issubclass(obj, om.OMAttribution):
attrs["pairs"] = decode_xml(elem[0])
attrs["obj"] = decode_xml(elem[1])
elif issubclass(obj, om.OMAttributionPairs):
if not _in_bind:
attrs["pairs"] = [(decode_xml(k), decode_xml(v)) for k, v in zip(elem[::2], elem[1::2])]
else:
obj = om.OMAttVar
attrs["pairs"] = decode_xml(elem[0], True)
attrs["obj"] = decode_xml(elem[1], True)
elif issubclass(obj, om.OMBinding):
attrs["binder"] = decode_xml(elem[0])
attrs["vars"] = decode_xml(elem[1])
attrs["obj"] = decode_xml(elem[2])
elif issubclass(obj, om.OMBindVariables):
attrs["vars"] = list(map(lambda x:decode_xml(x, True), elem[:]))
elif issubclass(obj, om.OMError):
attrs["name"] = decode_xml(elem[0])
attrs["params"] = list(map(decode_xml, elem[1:]))
else:
raise TypeError("Expected OMAny, found %s." % obj.__name__)
return obj(**attrs) | Decodes an XML element into an OpenMath object.
:param elem: Element to decode.
:type elem: etree._Element
:param _in_bind: Internal flag used to indicate if we should decode within
an OMBind.
:type _in_bind: bool
:rtype: OMAny |
def normalize(self, stats:Collection[Tensor]=None, do_x:bool=True, do_y:bool=False)->None:
"Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)"
if getattr(self,'norm',False): raise Exception('Can not call normalize twice')
if stats is None: self.stats = self.batch_stats()
else: self.stats = stats
self.norm,self.denorm = normalize_funcs(*self.stats, do_x=do_x, do_y=do_y)
self.add_tfm(self.norm)
return self | Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`) |
def read_data(self, **kwargs):
"""
get the data from the service
:param kwargs: contain keyword args : trigger_id at least
:type kwargs: dict
:rtype: list
"""
now = arrow.utcnow().to(settings.TIME_ZONE)
my_toots = []
search = {}
since_id = None
trigger_id = kwargs['trigger_id']
date_triggered = arrow.get(kwargs['date_triggered'])
def _get_toots(toot_api, toot_obj, search):
"""
get the toots from mastodon and return the filters to use
:param toot_obj: from Mastodon model
:param search: filter used for MastodonAPI.search()
:type toot_obj: Object ServiceMastodon
:type search: dict
:return: the filter named search, the toots
:rtype: list
"""
max_id = 0 if toot_obj.max_id is None else toot_obj.max_id
since_id = 0 if toot_obj.since_id is None else toot_obj.since_id
# get the toots for a given tag
statuses = ''
if toot_obj.tag:
search['q'] = toot_obj.tag
# do a search
statuses = toot_api.search(**search)
# just return the content of te statuses array
statuses = statuses['statuses']
# get the tweets from a given user
elif toot_obj.tooter:
search['id'] = toot_obj.tooter
# call the user timeline and get his toot
if toot_obj.fav:
statuses = toot_api.favourites(max_id=max_id,
since_id=since_id)
else:
user_id = toot_api.account_search(q=toot_obj.tooter)
statuses = toot_api.account_statuses(
id=user_id[0]['id'], max_id=toot_obj.max_id,
since_id=toot_obj.since_id)
return search, statuses
if self.token is not None:
kw = {'app_label': 'th_mastodon', 'model_name': 'Mastodon', 'trigger_id': trigger_id}
toot_obj = super(ServiceMastodon, self).read_data(**kw)
us = UserService.objects.get(token=self.token, name='ServiceMastodon')
try:
toot_api = MastodonAPI(
client_id=us.client_id,
client_secret=us.client_secret,
access_token=self.token,
api_base_url=us.host,
)
except ValueError as e:
logger.error(e)
update_result(trigger_id, msg=e, status=False)
if toot_obj.since_id is not None and toot_obj.since_id > 0:
since_id = toot_obj.since_id
search = {'since_id': toot_obj.since_id}
# first request to Mastodon
search, statuses = _get_toots(toot_api, toot_obj, search)
if len(statuses) > 0:
newest = None
for status in statuses:
if newest is None:
newest = True
# first query ; get the max id
search['max_id'] = max_id = status['id']
since_id = search['since_id'] = statuses[-1]['id'] - 1
search, statuses = _get_toots(toot_api, toot_obj, search)
newest = None
if len(statuses) > 0:
my_toots = []
for s in statuses:
if newest is None:
newest = True
max_id = s['id'] - 1
toot_name = s['account']['username']
# get the text of the tweet + url to this one
title = _('Toot from <a href="{}">@{}</a>'.
format(us.host, toot_name))
my_date = arrow.get(s['created_at']).to(
settings.TIME_ZONE)
published = arrow.get(my_date).to(settings.TIME_ZONE)
if date_triggered is not None and \
published is not None and \
now >= published >= date_triggered:
my_toots.append({'title': title,
'content': s['content'],
'link': s['url'],
'my_date': my_date})
# digester
self.send_digest_event(trigger_id, title, s['url'])
cache.set('th_mastodon_' + str(trigger_id), my_toots)
Mastodon.objects.filter(trigger_id=trigger_id).update(
since_id=since_id, max_id=max_id)
return my_toots | get the data from the service
:param kwargs: contain keyword args : trigger_id at least
:type kwargs: dict
:rtype: list |
def import_batch(self, filename):
"""Imports the batch of outgoing transactions into
model IncomingTransaction.
"""
batch = self.batch_cls()
json_file = self.json_file_cls(name=filename, path=self.path)
try:
deserialized_txs = json_file.deserialized_objects
except JSONFileError as e:
raise TransactionImporterError(e) from e
try:
batch.populate(deserialized_txs=deserialized_txs, filename=json_file.name)
except (
BatchDeserializationError,
InvalidBatchSequence,
BatchAlreadyProcessed,
) as e:
raise TransactionImporterError(e) from e
batch.save()
batch.update_history()
return batch | Imports the batch of outgoing transactions into
model IncomingTransaction. |
def run_evaluate(self, *args, **kwargs) -> None:
"""
Evaluates the current item
:returns An evaluation result object containing the result, or reasons why
evaluation failed
"""
if self._needs_evaluation:
for _, item in self._nested_items.items():
item.run_evaluate() | Evaluates the current item
:returns An evaluation result object containing the result, or reasons why
evaluation failed |
def _get_cached_style_urls(self, asset_url_path):
"""
Gets the URLs of the cached styles.
"""
try:
cached_styles = os.listdir(self.cache_path)
except IOError as ex:
if ex.errno != errno.ENOENT and ex.errno != errno.ESRCH:
raise
return []
except OSError:
return []
return [posixpath.join(asset_url_path, style)
for style in cached_styles
if style.endswith('.css')] | Gets the URLs of the cached styles. |
def _dt_to_epoch(dt):
"""Convert datetime to epoch seconds."""
try:
epoch = dt.timestamp()
except AttributeError: # py2
epoch = (dt - datetime(1970, 1, 1)).total_seconds()
return epoch | Convert datetime to epoch seconds. |
def validate_registry_uri_authority(auth: str) -> None:
"""
Raise an exception if the authority is not a valid ENS domain
or a valid checksummed contract address.
"""
if is_ens_domain(auth) is False and not is_checksum_address(auth):
raise ValidationError(f"{auth} is not a valid registry URI authority.") | Raise an exception if the authority is not a valid ENS domain
or a valid checksummed contract address. |
def _get_data_bytes_or_stream_only(param_name, param_value):
'''Validates the request body passed in is a stream/file-like or bytes
object.'''
if param_value is None:
return b''
if isinstance(param_value, bytes) or hasattr(param_value, 'read'):
return param_value
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) | Validates the request body passed in is a stream/file-like or bytes
object. |
def _init_fld2col_widths(self):
"""Return default column widths for writing an Excel Spreadsheet."""
# GO info namedtuple fields: NS dcnt level depth GO D1 name
# GO header namedtuple fields: format_txt hdr_idx
fld2col_widths = GoSubDagWr.fld2col_widths.copy()
for fld, wid in self.oprtfmt.default_fld2col_widths.items():
fld2col_widths[fld] = wid
for fld in get_hdridx_flds():
fld2col_widths[fld] = 2
return fld2col_widths | Return default column widths for writing an Excel Spreadsheet. |
def authenticate(self, request):
"""
Authenticate a user from a token form field
Errors thrown here will be swallowed by django-rest-framework, and it
expects us to return None if authentication fails.
"""
try:
key = request.data['token']
except KeyError:
return
try:
token = AuthToken.objects.get(key=key)
except AuthToken.DoesNotExist:
return
return (token.user, token) | Authenticate a user from a token form field
Errors thrown here will be swallowed by django-rest-framework, and it
expects us to return None if authentication fails. |
def _shrink(self):
"""
Shrinks the dynamic table to be at or below maxsize
"""
cursize = self._current_size
while cursize > self._maxsize:
name, value = self.dynamic_entries.pop()
cursize -= table_entry_size(name, value)
self._current_size = cursize | Shrinks the dynamic table to be at or below maxsize |
def pretty_print(self, indent=0):
"""Print the document without tags using indentation
"""
s = tab = ' '*indent
s += '%s: ' %self.tag
if isinstance(self.value, basestring):
s += self.value
else:
s += '\n'
for e in self.value:
s += e.pretty_print(indent+4)
s += '\n'
return s | Print the document without tags using indentation |
def _get_char(self, win, char):
def get_check_next_byte():
char = win.getch()
if 128 <= char <= 191:
return char
else:
raise UnicodeError
bytes = []
if char <= 127:
# 1 bytes
bytes.append(char)
#elif 194 <= char <= 223:
elif 192 <= char <= 223:
# 2 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
elif 224 <= char <= 239:
# 3 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
elif 240 <= char <= 244:
# 4 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
#print('bytes = {}'.format(bytes))
""" no zero byte allowed """
while 0 in bytes:
bytes.remove(0)
if version_info < (3, 0):
out = ''.join([chr(b) for b in bytes])
else:
buf = bytearray(bytes)
out = self._decode_string(buf)
#out = buf.decode('utf-8')
return out | no zero byte allowed |
def mean(name, num, minimum=0, maximum=0, ref=None):
'''
Calculates the mean of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mean:
- name: myregentry
- num: 5
'''
return calc(
name=name,
num=num,
oper='mean',
minimum=minimum,
maximum=maximum,
ref=ref
) | Calculates the mean of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mean:
- name: myregentry
- num: 5 |
def _parse_selector(self, scoped=True, allow_periods_in_scope=False):
"""Parse a (possibly scoped) selector.
A selector is a sequence of one or more valid Python-style identifiers
separated by periods (see also `SelectorMap`). A scoped selector is a
selector that may be preceded by scope names (separated by slashes).
Args:
scoped: Whether scopes are allowed.
allow_periods_in_scope: Whether to allow period characters in the scope
names preceding the selector.
Returns:
The parsed selector (as a string).
Raises:
SyntaxError: If the scope or selector is malformatted.
"""
if self._current_token.kind != tokenize.NAME:
self._raise_syntax_error('Unexpected token.')
begin_line_num = self._current_token.begin[0]
begin_char_num = self._current_token.begin[1]
end_char_num = self._current_token.end[1]
line = self._current_token.line
selector_parts = []
# This accepts an alternating sequence of NAME and '/' or '.' tokens.
step_parity = 0
while (step_parity == 0 and self._current_token.kind == tokenize.NAME or
step_parity == 1 and self._current_token.value in ('/', '.')):
selector_parts.append(self._current_token.value)
step_parity = not step_parity
end_char_num = self._current_token.end[1]
self._advance_one_token()
self._skip_whitespace_and_comments()
# Due to tokenization, most whitespace has been stripped already. To prevent
# whitespace inside the scoped selector, we verify that it matches an
# untokenized version of the selector obtained from the first through last
# character positions of the consumed tokens in the line being parsed.
scoped_selector = ''.join(selector_parts)
untokenized_scoped_selector = line[begin_char_num:end_char_num]
# Also check that it's properly formatted (e.g., no consecutive slashes).
scope_re = IDENTIFIER_RE
if allow_periods_in_scope:
scope_re = MODULE_RE
selector_re = MODULE_RE
scope_parts = scoped_selector.split('/')
valid_format = all(scope_re.match(scope) for scope in scope_parts[:-1])
valid_format &= bool(selector_re.match(scope_parts[-1]))
valid_format &= bool(scoped or len(scope_parts) == 1)
if untokenized_scoped_selector != scoped_selector or not valid_format:
location = (self._filename, begin_line_num, begin_char_num + 1, line)
self._raise_syntax_error('Malformatted scope or selector.', location)
return scoped_selector | Parse a (possibly scoped) selector.
A selector is a sequence of one or more valid Python-style identifiers
separated by periods (see also `SelectorMap`). A scoped selector is a
selector that may be preceded by scope names (separated by slashes).
Args:
scoped: Whether scopes are allowed.
allow_periods_in_scope: Whether to allow period characters in the scope
names preceding the selector.
Returns:
The parsed selector (as a string).
Raises:
SyntaxError: If the scope or selector is malformatted. |
def begin_auth():
""" Request authentication token to sign """
repository = request.headers['repository']
if repository not in config['repositories']: return fail(no_such_repo_msg)
# ==
repository_path = config['repositories'][repository]['path']
conn = auth_db_connect(cpjoin(repository_path, 'auth_transient.db')); gc_tokens(conn)
# Issue a new token
auth_token = base64.b64encode(pysodium.randombytes(35)).decode('utf-8')
conn.execute("insert into tokens (expires, token, ip) values (?,?,?)",
(time.time() + 30, auth_token, request.environ['REMOTE_ADDR']))
conn.commit()
return success({'auth_token' : auth_token}) | Request authentication token to sign |
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg) | Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None. |
def com_google_fonts_check_whitespace_widths(ttFont):
"""Whitespace and non-breaking space have the same width?"""
from fontbakery.utils import get_glyph_name
space_name = get_glyph_name(ttFont, 0x0020)
nbsp_name = get_glyph_name(ttFont, 0x00A0)
space_width = ttFont['hmtx'][space_name][0]
nbsp_width = ttFont['hmtx'][nbsp_name][0]
if space_width > 0 and space_width == nbsp_width:
yield PASS, "Whitespace and non-breaking space have the same width."
else:
yield FAIL, ("Whitespace and non-breaking space have differing width:"
" Whitespace ({}) is {} font units wide, non-breaking space"
" ({}) is {} font units wide. Both should be positive and the"
" same.").format(space_name, space_width, nbsp_name,
nbsp_width) | Whitespace and non-breaking space have the same width? |
def arp():
'''
Return the arp table from the minion
.. versionchanged:: 2015.8.0
Added support for SunOS
CLI Example:
.. code-block:: bash
salt '*' network.arp
'''
ret = {}
out = __salt__['cmd.run']('arp -an')
for line in out.splitlines():
comps = line.split()
if len(comps) < 4:
continue
if __grains__['kernel'] == 'SunOS':
if ':' not in comps[-1]:
continue
ret[comps[-1]] = comps[1]
elif __grains__['kernel'] == 'OpenBSD':
if comps[0] == 'Host' or comps[1] == '(incomplete)':
continue
ret[comps[1]] = comps[0]
elif __grains__['kernel'] == 'AIX':
if comps[0] in ('bucket', 'There'):
continue
ret[comps[3]] = comps[1].strip('(').strip(')')
else:
ret[comps[3]] = comps[1].strip('(').strip(')')
return ret | Return the arp table from the minion
.. versionchanged:: 2015.8.0
Added support for SunOS
CLI Example:
.. code-block:: bash
salt '*' network.arp |
def get_receive(self, script_list):
"""Return a list of received events contained in script_list."""
events = defaultdict(set)
for script in script_list:
if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE:
event = script.blocks[0].args[0].lower()
events[event].add(script)
return events | Return a list of received events contained in script_list. |
def evaluate_objective(self):
"""
Evaluates the objective
"""
self.Y_new, cost_new = self.objective.evaluate(self.suggested_sample)
self.cost.update_cost_model(self.suggested_sample, cost_new)
self.Y = np.vstack((self.Y,self.Y_new)) | Evaluates the objective |
def stamp(name, backdate=None,
unique=None, keep_subdivisions=None, quick_print=None,
un=None, ks=None, qp=None):
"""
Mark the end of a timing interval.
Notes:
If keeping subdivisions, each subdivision currently awaiting
assignment to a stamp (i.e. ended since the last stamp in this level)
will be assigned to this one. Otherwise, all awaiting ones will be
discarded after aggregating their self times into the current timer.
If both long- and short-form are present, they are OR'ed together. If
neither are present, the current global default is used.
Backdating: record a stamp as if it happened at an earlier time.
Backdate time must be in the past but more recent than the latest stamp.
(This can be useful for parallel applications, wherein a sub- process
can return times of interest to the master process.)
Warning:
When backdating, awaiting subdivisions will be assigned as normal, with
no additional checks for validity.
Args:
name (any): The identifier for this interval, processed through str()
backdate (float, optional): time to use for stamp instead of current
unique (bool, optional): enforce uniqueness
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): print elapsed interval time
un (bool, optional): short-form for unique
ks (bool, optional): short-form for keep_subdivisions
qp (bool, optional): short-form for quick_print
Returns:
float: The current time.
Raises:
BackdateError: If the given backdate time is out of range.
PausedError: If the timer is paused.
StoppedError: If the timer is stopped.
TypeError: If the given backdate value is not type float.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot stamp stopped timer.")
if f.t.paused:
raise PausedError("Cannot stamp paused timer.")
if backdate is None:
t_stamp = t
else:
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.t.last_t:
raise BackdateError("Cannot backdate to time earlier than last stamp.")
t_stamp = backdate
elapsed = t_stamp - f.t.last_t
# Logic: default unless either arg used. if both args used, 'or' them.
unique = SET['UN'] if (unique is None and un is None) else bool(unique or un) # bool(None) becomes False
keep_subdivisions = SET['KS'] if (keep_subdivisions is None and ks is None) else bool(keep_subdivisions or ks)
quick_print = SET['QP'] if (quick_print is None and qp is None) else bool(quick_print or qp)
_stamp(name, elapsed, unique, keep_subdivisions, quick_print)
tmp_self = timer() - t
f.t.self_cut += tmp_self
f.t.last_t = t_stamp + tmp_self
return t | Mark the end of a timing interval.
Notes:
If keeping subdivisions, each subdivision currently awaiting
assignment to a stamp (i.e. ended since the last stamp in this level)
will be assigned to this one. Otherwise, all awaiting ones will be
discarded after aggregating their self times into the current timer.
If both long- and short-form are present, they are OR'ed together. If
neither are present, the current global default is used.
Backdating: record a stamp as if it happened at an earlier time.
Backdate time must be in the past but more recent than the latest stamp.
(This can be useful for parallel applications, wherein a sub- process
can return times of interest to the master process.)
Warning:
When backdating, awaiting subdivisions will be assigned as normal, with
no additional checks for validity.
Args:
name (any): The identifier for this interval, processed through str()
backdate (float, optional): time to use for stamp instead of current
unique (bool, optional): enforce uniqueness
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): print elapsed interval time
un (bool, optional): short-form for unique
ks (bool, optional): short-form for keep_subdivisions
qp (bool, optional): short-form for quick_print
Returns:
float: The current time.
Raises:
BackdateError: If the given backdate time is out of range.
PausedError: If the timer is paused.
StoppedError: If the timer is stopped.
TypeError: If the given backdate value is not type float. |
def WriteLine(log: Any, consoleColor: int = -1, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:
"""
log: any type.
consoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.
writeToFile: bool.
printToStdout: bool.
logFile: str, log file path.
"""
Logger.Write('{}\n'.format(log), consoleColor, writeToFile, printToStdout, logFile) | log: any type.
consoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.
writeToFile: bool.
printToStdout: bool.
logFile: str, log file path. |
def fulltext_scan_ids(self, query_id=None, query_fc=None,
preserve_order=True, indexes=None):
'''Fulltext search for identifiers.
Yields an iterable of triples (score, identifier)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:rtype: Iterable of ``(score, content_id)``
'''
it = self._fulltext_scan(query_id, query_fc, feature_names=False,
preserve_order=preserve_order,
indexes=indexes)
for hit in it:
yield hit['_score'], did(hit['_id']) | Fulltext search for identifiers.
Yields an iterable of triples (score, identifier)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:rtype: Iterable of ``(score, content_id)`` |
def _escape_jid(jid):
'''
Do proper formatting of the jid
'''
jid = six.text_type(jid)
jid = re.sub(r"'*", "", jid)
return jid | Do proper formatting of the jid |
def fstab(config='/etc/fstab'):
'''
.. versionchanged:: 2016.3.2
List the contents of the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.fstab
'''
ret = {}
if not os.path.isfile(config):
return ret
with salt.utils.files.fopen(config) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
try:
if __grains__['kernel'] == 'SunOS':
# Note: comments use in default vfstab file!
if line[0] == '#':
continue
entry = _vfstab_entry.dict_from_line(
line)
else:
entry = _fstab_entry.dict_from_line(
line,
_fstab_entry.compatibility_keys)
entry['opts'] = entry['opts'].split(',')
while entry['name'] in ret:
entry['name'] += '_'
ret[entry.pop('name')] = entry
except _fstab_entry.ParseError:
pass
except _vfstab_entry.ParseError:
pass
return ret | .. versionchanged:: 2016.3.2
List the contents of the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.fstab |
def _tr_system(line_info):
"Translate lines escaped with: !"
cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
return '%sget_ipython().system(%r)' % (line_info.pre, cmd) | Translate lines escaped with: ! |
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise | Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist. |
def resizeToMinimum(self):
"""
Resizes the dock toolbar to the minimum sizes.
"""
offset = self.padding()
min_size = self.minimumPixmapSize()
if self.position() in (XDockToolbar.Position.East,
XDockToolbar.Position.West):
self.resize(min_size.width() + offset, self.height())
elif self.position() in (XDockToolbar.Position.North,
XDockToolbar.Position.South):
self.resize(self.width(), min_size.height() + offset) | Resizes the dock toolbar to the minimum sizes. |
def rpc(name, dest=None, **kwargs):
'''
Executes the given rpc. The returned data can be stored in a file
by specifying the destination path with dest as an argument
.. code-block:: yaml
get-interface-information:
junos:
- rpc
- dest: /home/user/rpc.log
- interface_name: lo0
Parameters:
Required
* cmd:
The rpc to be executed. (default = None)
Optional
* dest:
Destination file where the rpc output is stored. (default = None)
Note that the file will be stored on the proxy minion. To push the
files to the master use the salt's following execution module: \
:py:func:`cp.push <salt.modules.cp.push>`
* format:
The format in which the rpc reply must be stored in file specified in the dest
(used only when dest is specified) (default = xml)
* kwargs: keyworded arguments taken by rpc call like-
* timeout:
Set NETCONF RPC timeout. Can be used for commands which
take a while to execute. (default= 30 seconds)
* filter:
Only to be used with 'get-config' rpc to get specific configuration.
* terse:
Amount of information you want.
* interface_name:
Name of the interface whose information you want.
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.rpc'](name, dest, **kwargs)
return ret | Executes the given rpc. The returned data can be stored in a file
by specifying the destination path with dest as an argument
.. code-block:: yaml
get-interface-information:
junos:
- rpc
- dest: /home/user/rpc.log
- interface_name: lo0
Parameters:
Required
* cmd:
The rpc to be executed. (default = None)
Optional
* dest:
Destination file where the rpc output is stored. (default = None)
Note that the file will be stored on the proxy minion. To push the
files to the master use the salt's following execution module: \
:py:func:`cp.push <salt.modules.cp.push>`
* format:
The format in which the rpc reply must be stored in file specified in the dest
(used only when dest is specified) (default = xml)
* kwargs: keyworded arguments taken by rpc call like-
* timeout:
Set NETCONF RPC timeout. Can be used for commands which
take a while to execute. (default= 30 seconds)
* filter:
Only to be used with 'get-config' rpc to get specific configuration.
* terse:
Amount of information you want.
* interface_name:
Name of the interface whose information you want. |
def _AddUser(self, user):
"""Configure a Linux user account.
Args:
user: string, the name of the Linux user account to create.
Returns:
bool, True if user creation succeeded.
"""
self.logger.info('Creating a new user account for %s.', user)
command = self.useradd_cmd.format(user=user)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not create user %s. %s.', user, str(e))
return False
else:
self.logger.info('Created user account %s.', user)
return True | Configure a Linux user account.
Args:
user: string, the name of the Linux user account to create.
Returns:
bool, True if user creation succeeded. |
def to_new(self, data, perplexities=None, return_distances=False):
"""Compute the affinities of new samples to the initial samples.
This is necessary for embedding new data points into an existing
embedding.
Please see the :ref:`parameter-guide` for more information.
Parameters
----------
data: np.ndarray
The data points to be added to the existing embedding.
perplexities: List[float]
A list of perplexity values, which will be used in the multiscale
Gaussian kernel. Perplexity can be thought of as the continuous
:math:`k` number of nearest neighbors, for which t-SNE will attempt
to preserve distances.
return_distances: bool
If needed, the function can return the indices of the nearest
neighbors and their corresponding distances.
Returns
-------
P: array_like
An :math:`N \\times M` affinity matrix expressing interactions
between :math:`N` new data points the initial :math:`M` data
samples.
indices: np.ndarray
Returned if ``return_distances=True``. The indices of the :math:`k`
nearest neighbors in the existing embedding for every new data
point.
distances: np.ndarray
Returned if ``return_distances=True``. The distances to the
:math:`k` nearest neighbors in the existing embedding for every new
data point.
"""
perplexities = perplexities if perplexities is not None else self.perplexities
perplexities = self.check_perplexities(perplexities)
max_perplexity = np.max(perplexities)
k_neighbors = min(self.n_samples - 1, int(3 * max_perplexity))
neighbors, distances = self.knn_index.query(data, k_neighbors)
P = self._calculate_P(
neighbors,
distances,
perplexities,
symmetrize=False,
normalization="point-wise",
n_reference_samples=self.n_samples,
n_jobs=self.n_jobs,
)
if return_distances:
return P, neighbors, distances
return P | Compute the affinities of new samples to the initial samples.
This is necessary for embedding new data points into an existing
embedding.
Please see the :ref:`parameter-guide` for more information.
Parameters
----------
data: np.ndarray
The data points to be added to the existing embedding.
perplexities: List[float]
A list of perplexity values, which will be used in the multiscale
Gaussian kernel. Perplexity can be thought of as the continuous
:math:`k` number of nearest neighbors, for which t-SNE will attempt
to preserve distances.
return_distances: bool
If needed, the function can return the indices of the nearest
neighbors and their corresponding distances.
Returns
-------
P: array_like
An :math:`N \\times M` affinity matrix expressing interactions
between :math:`N` new data points the initial :math:`M` data
samples.
indices: np.ndarray
Returned if ``return_distances=True``. The indices of the :math:`k`
nearest neighbors in the existing embedding for every new data
point.
distances: np.ndarray
Returned if ``return_distances=True``. The distances to the
:math:`k` nearest neighbors in the existing embedding for every new
data point. |
def remove_labels(self, test):
"""
Remove labels from this cell.
The function or callable ``test`` is called for each label in
the cell. If its return value evaluates to ``True``, the
corresponding label is removed from the cell.
Parameters
----------
test : callable
Test function to query whether a label should be removed.
The function is called with the label as the only argument.
Returns
-------
out : ``Cell``
This cell.
Examples
--------
Remove labels in layer 1:
>>> cell.remove_labels(lambda lbl: lbl.layer == 1)
"""
ii = 0
while ii < len(self.labels):
if test(self.labels[ii]):
self.labels.pop(ii)
else:
ii += 1
return self | Remove labels from this cell.
The function or callable ``test`` is called for each label in
the cell. If its return value evaluates to ``True``, the
corresponding label is removed from the cell.
Parameters
----------
test : callable
Test function to query whether a label should be removed.
The function is called with the label as the only argument.
Returns
-------
out : ``Cell``
This cell.
Examples
--------
Remove labels in layer 1:
>>> cell.remove_labels(lambda lbl: lbl.layer == 1) |
def _match_offset_front_id_to_onset_front_id(onset_front_id, onset_fronts, offset_fronts, onsets, offsets):
"""
Find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the
given onset front.
The offset front which contains the most of such offsets is the match.
If there are no such offset fronts, return -1.
"""
# find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the onset front
# the offset front which contains the most of such offsets is the match
# get the onsets that make up front_id
onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# get the offsets that match the onsets in front_id
offset_idxs = [_lookup_offset_by_onset_idx(i, onsets, offsets) for i in onset_idxs]
# get all offset_fronts which contain at least one of these offsets
candidate_offset_front_ids = set([int(offset_fronts[f, i]) for f, i in offset_idxs])
# It is possible that offset_idxs contains offset indexes that correspond to offsets that did not
# get formed into a front - those will have a front ID of 0. Remove them.
candidate_offset_front_ids = [id for id in candidate_offset_front_ids if id != 0]
if candidate_offset_front_ids:
chosen_offset_front_id = _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offset_idxs)
else:
chosen_offset_front_id = _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts)
return chosen_offset_front_id | Find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the
given onset front.
The offset front which contains the most of such offsets is the match.
If there are no such offset fronts, return -1. |
def configure(cls, impl, **kwargs):
# type: (Any, **Any) -> None
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs | Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters. |
def coerce(self, value):
"""Convert text values into boolean values.
True values are (case insensitive): 'yes', 'true', '1'. False values
are (case insensitive): 'no', 'false', '0'.
Args:
value (str or bool): The value to coerce.
Raises:
TypeError: If the value is not a bool or string.
ValueError: If the value is not bool or an acceptable value.
Returns:
bool: The True/False value represented.
"""
if isinstance(value, bool):
return value
if not hasattr(value, 'lower'):
raise TypeError('Value is not bool or string.')
if value.lower() in ('yes', 'true', '1'):
return True
if value.lower() in ('no', 'false', '0'):
return False
raise ValueError('Could not coerce {0} to a bool.'.format(value)) | Convert text values into boolean values.
True values are (case insensitive): 'yes', 'true', '1'. False values
are (case insensitive): 'no', 'false', '0'.
Args:
value (str or bool): The value to coerce.
Raises:
TypeError: If the value is not a bool or string.
ValueError: If the value is not bool or an acceptable value.
Returns:
bool: The True/False value represented. |
def featureName(self):
"""
ID attribute from GFF3 or None if record doesn't have it.
Called "Name" rather than "Id" within GA4GH, as there is
no guarantee of either uniqueness or existence.
"""
featId = self.attributes.get("ID")
if featId is not None:
featId = featId[0]
return featId | ID attribute from GFF3 or None if record doesn't have it.
Called "Name" rather than "Id" within GA4GH, as there is
no guarantee of either uniqueness or existence. |
def _set_symlink_ownership(path, user, group, win_owner):
'''
Set the ownership of a symlink and return a boolean indicating
success/failure
'''
if salt.utils.platform.is_windows():
try:
salt.utils.win_dacl.set_owner(path, win_owner)
except CommandExecutionError:
pass
else:
try:
__salt__['file.lchown'](path, user, group)
except OSError:
pass
return _check_symlink_ownership(path, user, group, win_owner) | Set the ownership of a symlink and return a boolean indicating
success/failure |
def DomainTokensGet(self, domain_id):
"""
T his method returns the list of tokens which are available for this domain.
Only domain managers can list domain tokens.
@param domain_id - ID of the domain for which to retrieve tokens
@return (bool) - Boolean indicating whether DomainTokensGet was successful
"""
if self.__SenseApiCall__('/domains/{0}/tokens.json'.format(domain_id), 'GET'):
return True
else:
self.__error__ = "api call unsuccessful"
return False | T his method returns the list of tokens which are available for this domain.
Only domain managers can list domain tokens.
@param domain_id - ID of the domain for which to retrieve tokens
@return (bool) - Boolean indicating whether DomainTokensGet was successful |
def get_log_entries_by_log(self, log_id):
"""Gets the list of log entries associated with a ``Log``.
arg: log_id (osid.id.Id): ``Id`` of a ``Log``
return: (osid.logging.LogEntryList) - list of related logEntry
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bin
mgr = self._get_provider_manager('LOGGING', local=True)
lookup_session = mgr.get_log_entry_lookup_session_for_log(log_id, proxy=self._proxy)
lookup_session.use_isolated_log_view()
return lookup_session.get_log_entries() | Gets the list of log entries associated with a ``Log``.
arg: log_id (osid.id.Id): ``Id`` of a ``Log``
return: (osid.logging.LogEntryList) - list of related logEntry
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
Subsets and Splits