sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def periods(ts, phi=0.0):
"""For a single variable timeseries representing the phase of an oscillator,
measure the period of each successive oscillation.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries begins (or ends) exactly at phi, then the first
(or last) oscillation will be included.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): A single oscillation starts and ends at phase phi (by
default zero).
"""
ts = np.squeeze(ts)
if ts.ndim <= 1:
return np.diff(phase_crossings(ts, phi))
else:
return np.hstack([ts[...,i].periods(phi) for i in range(ts.shape[-1])]) | For a single variable timeseries representing the phase of an oscillator,
measure the period of each successive oscillation.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries begins (or ends) exactly at phi, then the first
(or last) oscillation will be included.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): A single oscillation starts and ends at phase phi (by
default zero). | entailment |
def circmean(ts, axis=2):
"""Circular mean phase"""
return np.exp(1.0j * ts).mean(axis=axis).angle() | Circular mean phase | entailment |
def order_param(ts, axis=2):
"""Order parameter of phase synchronization"""
return np.abs(np.exp(1.0j * ts).mean(axis=axis)) | Order parameter of phase synchronization | entailment |
def cwtmorlet(points, width):
"""complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,)
"""
omega = 5.0
s = points / (2.0 * omega * width)
return wavelets.morlet(points, omega, s, complete=True) | complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,) | entailment |
def roughcwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> sig = np.random.rand(20) - 0.5
>>> wavelet = signal.ricker
>>> widths = np.arange(1, 11)
>>> cwtmatr = signal.cwt(sig, wavelet, widths)
"""
out_dtype = wavelet(widths[0], widths[0]).dtype
output = np.zeros([len(widths), len(data)], dtype=out_dtype)
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(3 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output | Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> sig = np.random.rand(20) - 0.5
>>> wavelet = signal.ricker
>>> widths = np.arange(1, 11)
>>> cwtmatr = signal.cwt(sig, wavelet, widths) | entailment |
def variability_fp(ts, freqs=None, ncycles=6, plot=True):
"""Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power.
"""
if freqs is None:
freqs = np.logspace(np.log10(1.0), np.log10(60.0), 50)
else:
freqs = np.array(freqs)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
n = len(ts)
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (n - 1)
fs = 1.0 / dt
dtype = ts.dtype
# Estimate time-resolved power spectra using continuous wavelet transform
coefs = ts.cwt(freqs, wavelet=cwtmorlet, plot=False)
# this is a huge array so try to do operations in place
powers = np.square(np.abs(coefs, coefs), coefs).real.astype(dtype,
copy=False)
del coefs
max_power = np.max(powers, axis=1)
total_power = np.sum(powers, axis=1, keepdims=True)
rel_power = np.divide(powers, total_power, powers)
del powers
centroid_freq = np.tensordot(freqs, rel_power, axes=(0, 1)) # shape (n, m)
del rel_power
# hw is half window size (in number of samples)
hw = np.int64(np.ceil(0.5 * ncycles * fs / centroid_freq)) # shape (n, m)
allchannels_variability = np.zeros((n, channels, 2), dtype) # output array
for i in range(channels):
logvar_centfreq = np.zeros(n, dtype)
logvar_maxpower = np.zeros(n, dtype)
for j in range(n):
# compute variance of two chosen signal properties over a
# window of 2*hw+1 samples centered on sample number j
wstart = j - hw[j, i]
wend = j + hw[j, i]
if wstart >= 0 and wend < n:
logvar_centfreq[j] = np.log(centroid_freq[wstart:wend+1].var())
logvar_maxpower[j] = np.log(max_power[wstart:wend+1].var())
else:
logvar_centfreq[j] = np.nan
logvar_maxpower[j] = np.nan
allchannels_variability[:, i, 0] = _rescale(logvar_centfreq)
allchannels_variability[:, i, 1] = _rescale(logvar_maxpower)
allchannels_variability = Timeseries(allchannels_variability,
ts.tspan, labels=ts.labels)
if plot:
_plot_variability(ts, allchannels_variability)
return allchannels_variability | Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power. | entailment |
def _rescale(ar):
"""Shift and rescale array ar to the interval [-1, 1]"""
max = np.nanmax(ar)
min = np.nanmin(ar)
midpoint = (max + min) / 2.0
return 2.0 * (ar - midpoint) / (max - min) | Shift and rescale array ar to the interval [-1, 1] | entailment |
def _get_color_list():
"""Get cycle of colors in a way compatible with all matplotlib versions"""
if 'axes.prop_cycle' in plt.rcParams:
return [p['color'] for p in list(plt.rcParams['axes.prop_cycle'])]
else:
return plt.rcParams['axes.color_cycle'] | Get cycle of colors in a way compatible with all matplotlib versions | entailment |
def _plot_variability(ts, variability, threshold=None, epochs=None):
"""Plot the timeseries and variability. Optionally plot epochs."""
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
import matplotlib.pyplot as plt
if variability.ndim is 1:
variability = variability[:, np.newaxis, np.newaxis]
elif variability.ndim is 2:
variability = variability[:, np.newaxis, :]
vmeasures = variability.shape[2]
channels = ts.shape[1]
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
fig = plt.figure()
ylabelprops = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
for i in range(channels):
rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1,
0.8, 0.85/channels)
axprops = dict()
if channels > 10:
axprops['yticks'] = []
ax = fig.add_axes(rect, **axprops)
ax.plot(ts.tspan, ts[:, i])
if ts.labels[1] is None:
ax.set_ylabel(u'channel %d' % i, **ylabelprops)
else:
ax.set_ylabel(ts.labels[1][i], **ylabelprops)
plt.setp(ax.get_xticklabels(), visible=False)
if i is channels - 1:
plt.setp(ax.get_xticklabels(), visible=True)
ax.set_xlabel('time (s)')
ax2 = ax.twinx()
if vmeasures > 1:
mean_v = np.nanmean(variability[:, i, :], axis=1)
ax2.plot(ts.tspan, mean_v, color='g')
colors = _get_color_list()
for j in range(vmeasures):
ax2.plot(ts.tspan, variability[:, i, j], linestyle='dotted',
color=colors[(3 + j) % len(colors)])
if i is 0:
ax2.legend(['variability (mean)'] +
['variability %d' % j for j in range(vmeasures)],
loc='best')
else:
ax2.plot(ts.tspan, variability[:, i, 0])
ax2.legend(('variability',), loc='best')
if threshold is not None:
ax2.axhline(y=threshold, color='Gray', linestyle='dashed')
ax2.set_ylabel('variability')
ymin = np.nanmin(ts[:, i])
ymax = np.nanmax(ts[:, i])
tstart = ts.tspan[0]
if epochs:
# highlight epochs using rectangular patches
for e in epochs[i]:
t1 = tstart + (e[0] - 1) * dt
ax.add_patch(mpl.patches.Rectangle(
(t1, ymin), (e[1] - e[0])*dt, ymax - ymin, alpha=0.2,
color='green', ec='none'))
fig.axes[0].set_title(u'variability (threshold = %g)' % threshold)
fig.show() | Plot the timeseries and variability. Optionally plot epochs. | entailment |
def epochs(ts, variability=None, threshold=0.0, minlength=1.0, plot=True):
"""Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
if variability is None:
variability = ts.variability_fp(plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
if variability.ndim is 1:
variability = variability[:, np.newaxis, np.newaxis]
elif variability.ndim is 2:
variability = variability[:, np.newaxis, :]
channels = ts.shape[1]
n = len(ts)
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (n - 1)
fs = 1.0 / dt
allchannels_epochs = []
for i in range(channels):
v = variability[:, i, :]
v = np.nanmean(v, axis=1) # mean of q different variability measures
# then smooth the variability with a low-pass filter
nonnan_ix = np.nonzero(~np.isnan(v))[0]
nonnans = slice(nonnan_ix.min(), nonnan_ix.max())
crit_freq = 1.0 # Hz
b, a = signal.butter(3, 2.0 * crit_freq / fs)
#v[nonnans] = signal.filtfilt(b, a, v[nonnans])
v[nonnan_ix] = signal.filtfilt(b, a, v[nonnan_ix])
# find all local minima of the variability not exceeding the threshold
m = v[1:-1]
l = v[0:-2]
r = v[2:]
minima = np.nonzero(~np.isnan(m) & ~np.isnan(l) & ~np.isnan(r) &
(m <= threshold) & (m-l < 0) & (r-m > 0))[0] + 1
if len(minima) is 0:
print(u'Channel %d: no epochs found using threshold %g' % (
i, threshold))
allchannels_epochs.append([])
else:
# Sort the list of minima by ascending variability
minima = minima[np.argsort(v[minima])]
epochs = []
for m in minima:
# Check this minimum is not inside an existing epoch
overlap = False
for e in epochs:
if m >= e[0] and m <= e[1]:
overlap = True
break
if not overlap:
# Get largest subthreshold interval surrounding the minimum
startix = m - 1
endix = m + 1
for startix in range(m - 1, 0, -1):
if np.isnan(v[startix]) or v[startix] > threshold:
startix += 1
break
for endix in range(m + 1, len(v), 1):
if np.isnan(v[endix]) or v[endix] > threshold:
break
if (endix - startix) * dt >= minlength:
epochs.append((startix, endix))
allchannels_epochs.append(epochs)
if plot:
_plot_variability(ts, variability, threshold, allchannels_epochs)
if orig_ndim is 1:
allchannels_epochs = allchannels_epochs[0]
return (variability, allchannels_epochs) | Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point) | entailment |
def epochs_distributed(ts, variability=None, threshold=0.0, minlength=1.0,
plot=True):
"""Same as `epochs()`, but computes channels in parallel for speed.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
import distob
if ts.ndim is 1:
ts = ts[:, np.newaxis]
if variability is None:
dts = distob.scatter(ts, axis=1)
vepochs = distob.vectorize(epochs)
results = vepochs(dts, None, threshold, minlength, plot=False)
else:
def f(pair):
return epochs(pair[0], pair[1], threshold, minlength, plot=False)
allpairs = [(ts[:, i], variability[:, i]) for i in range(ts.shape[1])]
vf = distob.vectorize(f)
results = vf(allpairs)
vars, allchannels_epochs = zip(*results)
variability = distob.hstack(vars)
if plot:
_plot_variability(ts, variability, threshold, allchannels_epochs)
return (variability, allchannels_epochs) | Same as `epochs()`, but computes channels in parallel for speed.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point) | entailment |
def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0,
proportion=0.75, plot=True):
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
"""
variability, allchannels_epochs = ts.epochs_distributed(
variability, threshold, minlength, plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
allchannels_epochs = [allchannels_epochs]
variability = variability[:, np.newaxis]
channels = ts.shape[1]
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
starts = [(e[0], 1) for channel in allchannels_epochs for e in channel]
ends = [(e[1], -1) for channel in allchannels_epochs for e in channel]
all = sorted(starts + ends)
joint_epochs = []
in_joint_epoch = False
joint_start = 0.0
inside_count = 0
for bound in all:
inside_count += bound[1]
if not in_joint_epoch and 1.0*inside_count/channels >= proportion:
in_joint_epoch = True
joint_start = bound[0]
if in_joint_epoch and 1.0*inside_count/channels < proportion:
in_joint_epoch = False
joint_end = bound[0]
if (joint_end - joint_start)*dt >= minlength:
joint_epochs.append((joint_start, joint_end))
if plot:
joint_epochs_repeated = [joint_epochs] * channels
_plot_variability(ts, variability, threshold, joint_epochs_repeated)
return (variability, joint_epochs) | Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point) | entailment |
def plot(ts, title=None, show=True):
"""Plot a Timeseries
Args:
ts Timeseries
title str
show bool whether to display the figure or just return a figure object
"""
ts = _remove_pi_crossings(ts)
fig = plt.figure()
ylabelprops = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
if ts.ndim > 2: # multiple sim timeseries. collapse vars onto each subplot.
num_subplots = ts.shape[ts.ndim - 1]
if title is None:
title = u'time series at each node'
for i in range(num_subplots):
ax = fig.add_subplot(num_subplots, 1, i+1)
ax.plot(ts.tspan, ts[...,i])
if ts.labels[-1] is not None:
ax.set_ylabel(ts.labels[-1][i], **ylabelprops)
else:
ax.set_ylabel('node ' + str(i), **ylabelprops)
plt.setp(ax.get_xticklabels(), visible=False)
fig.axes[0].set_title(title)
plt.setp(fig.axes[num_subplots-1].get_xticklabels(), visible=True)
fig.axes[num_subplots-1].set_xlabel('time (s)')
else: # single sim timeseries. show each variable separately.
if ts.ndim is 1:
ts = ts.reshape((-1, 1))
num_ax = ts.shape[1]
if title is None:
title=u'time series'
axprops = dict()
if num_ax > 10:
axprops['yticks'] = []
colors = _get_color_list()
for i in range(num_ax):
rect = 0.1, 0.85*(num_ax - i - 1)/num_ax + 0.1, 0.8, 0.85/num_ax
ax = fig.add_axes(rect, **axprops)
ax.plot(ts.tspan, ts[...,i], color=colors[i % len(colors)])
plt.setp(ax.get_xticklabels(), visible=False)
if ts.labels[1] is not None:
ax.set_ylabel(ts.labels[1][i], **ylabelprops)
fig.axes[0].set_title(title)
plt.setp(fig.axes[num_ax-1].get_xticklabels(), visible=True)
fig.axes[num_ax-1].set_xlabel('time (s)')
if show:
fig.show()
return fig | Plot a Timeseries
Args:
ts Timeseries
title str
show bool whether to display the figure or just return a figure object | entailment |
def _remove_pi_crossings(ts):
"""For each variable in the Timeseries, checks whether it represents
a phase variable ranging from -pi to pi. If so, set all points where the
phase crosses pi to 'nan' so that spurious lines will not be plotted.
If ts does not need adjustment, then return ts.
Otherwise return a modified copy.
"""
orig_ts = ts
if ts.ndim is 1:
ts = ts[:, np.newaxis, np.newaxis]
elif ts.ndim is 2:
ts = ts[:, np.newaxis]
# Get the indices of those variables that have range of approx -pi to pi
tsmax = ts.max(axis=0)
tsmin = ts.min(axis=0)
phase_vars = np.transpose(np.nonzero((np.abs(tsmax - np.pi) < 0.01) &
(np.abs(tsmin + np.pi) < 0.01)))
if len(phase_vars) is 0:
return orig_ts
else:
ts = ts.copy()
for v in phase_vars:
ts1 = np.asarray(ts[:, v[0], v[1]]) # time series of single variable
ts1a = ts1[0:-1]
ts1b = ts1[1:]
p2 = np.pi/2
# Find time indices where phase crosses pi. Set those values to nan.
pc = np.nonzero((ts1a > p2) & (ts1b < -p2) |
(ts1a < -p2) & (ts1b > p2))[0] + 1
ts1[pc] = np.nan
ts[:, v[0], v[1]] = ts1
return ts | For each variable in the Timeseries, checks whether it represents
a phase variable ranging from -pi to pi. If so, set all points where the
phase crosses pi to 'nan' so that spurious lines will not be plotted.
If ts does not need adjustment, then return ts.
Otherwise return a modified copy. | entailment |
def timeseries_from_mat(filename, varname=None, fs=1.0):
"""load a multi-channel Timeseries from a MATLAB .mat file
Args:
filename (str): .mat file to load
varname (str): variable name. only needed if there is more than one
variable saved in the .mat file
fs (scalar): sample rate of timeseries in Hz. (constant timestep assumed)
Returns:
Timeseries
"""
import scipy.io as sio
if varname is None:
mat_dict = sio.loadmat(filename)
if len(mat_dict) > 1:
raise ValueError('Must specify varname: file contains '
'more than one variable. ')
else:
mat_dict = sio.loadmat(filename, variable_names=(varname,))
array = mat_dict.popitem()[1]
return Timeseries(array, fs=fs) | load a multi-channel Timeseries from a MATLAB .mat file
Args:
filename (str): .mat file to load
varname (str): variable name. only needed if there is more than one
variable saved in the .mat file
fs (scalar): sample rate of timeseries in Hz. (constant timestep assumed)
Returns:
Timeseries | entailment |
def save_mat(ts, filename):
"""save a Timeseries to a MATLAB .mat file
Args:
ts (Timeseries): the timeseries to save
filename (str): .mat filename to save to
"""
import scipy.io as sio
tspan = ts.tspan
fs = (1.0*len(tspan) - 1) / (tspan[-1] - tspan[0])
mat_dict = {'data': np.asarray(ts),
'fs': fs,
'labels': ts.labels[1]}
sio.savemat(filename, mat_dict, do_compression=True)
return | save a Timeseries to a MATLAB .mat file
Args:
ts (Timeseries): the timeseries to save
filename (str): .mat filename to save to | entailment |
def timeseries_from_file(filename):
"""Load a multi-channel Timeseries from any file type supported by `biosig`
Supported file formats include EDF/EDF+, BDF/BDF+, EEG, CNT and GDF.
Full list is here: http://pub.ist.ac.at/~schloegl/biosig/TESTED
For EDF, EDF+, BDF and BDF+ files, we will use python-edf
if it is installed, otherwise will fall back to python-biosig.
Args:
filename
Returns:
Timeseries
"""
if not path.isfile(filename):
raise Error("file not found: '%s'" % filename)
is_edf_bdf = (filename[-4:].lower() in ['.edf', '.bdf'])
if is_edf_bdf:
try:
import edflib
return _load_edflib(filename)
except ImportError:
print('python-edf not installed. trying python-biosig instead...')
try:
import biosig
return _load_biosig(filename)
except ImportError:
message = (
"""To load timeseries from file, ensure python-biosig is installed
e.g. on Ubuntu or Debian type `apt-get install python-biosig`
or get it from http://biosig.sf.net/download.html""")
if is_edf_bdf:
message += """\n(For EDF/BDF files, can instead install python-edf:
https://bitbucket.org/cleemesser/python-edf/ )"""
raise Error(message) | Load a multi-channel Timeseries from any file type supported by `biosig`
Supported file formats include EDF/EDF+, BDF/BDF+, EEG, CNT and GDF.
Full list is here: http://pub.ist.ac.at/~schloegl/biosig/TESTED
For EDF, EDF+, BDF and BDF+ files, we will use python-edf
if it is installed, otherwise will fall back to python-biosig.
Args:
filename
Returns:
Timeseries | entailment |
def _load_edflib(filename):
"""load a multi-channel Timeseries from an EDF (European Data Format) file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
Timeseries
"""
import edflib
e = edflib.EdfReader(filename, annotations_mode='all')
if np.ptp(e.get_samples_per_signal()) != 0:
raise Error('channels have differing numbers of samples')
if np.ptp(e.get_signal_freqs()) != 0:
raise Error('channels have differing sample rates')
n = e.samples_in_file(0)
m = e.signals_in_file
channelnames = e.get_signal_text_labels()
dt = 1.0/e.samplefrequency(0)
# EDF files hold <=16 bits of information for each sample. Representing as
# double precision (64bit) is unnecessary use of memory. use 32 bit float:
ar = np.zeros((n, m), dtype=np.float32)
# edflib requires input buffer of float64s
buf = np.zeros((n,), dtype=np.float64)
for i in range(m):
e.read_phys_signal(i, 0, n, buf)
ar[:,i] = buf
tspan = np.arange(0, (n - 1 + 0.5) * dt, dt, dtype=np.float32)
return Timeseries(ar, tspan, labels=[None, channelnames]) | load a multi-channel Timeseries from an EDF (European Data Format) file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
Timeseries | entailment |
def annotations_from_file(filename):
"""Get a list of event annotations from an EDF (European Data Format file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
list: annotation events, each in the form [start_time, duration, text]
"""
import edflib
e = edflib.EdfReader(filename, annotations_mode='all')
return e.read_annotations() | Get a list of event annotations from an EDF (European Data Format file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
list: annotation events, each in the form [start_time, duration, text] | entailment |
def _ufunc_wrap(out_arr, ufunc, method, i, inputs, **kwargs):
"""After using the superclass __numpy_ufunc__ to route ufunc computations
on the array data, convert any resulting ndarray, RemoteArray and DistArray
instances into Timeseries, RemoteTimeseries and DistTimeseries instances
if appropriate"""
# Assigns tspan/labels to an axis only if inputs do not disagree on them.
shape = out_arr.shape
ndim = out_arr.ndim
if ndim is 0 or shape[0] is 0:
# not a timeseries
return out_arr
candidates = [a.tspan for a in inputs if (hasattr(a, 'tspan') and
a.shape[0] == shape[0])]
# Expensive to validate all tspans are the same. check start and end t
starts = [tspan[0] for tspan in candidates]
ends = [tspan[-1] for tspan in candidates]
if len(set(starts)) != 1 or len(set(ends)) != 1:
# inputs cannot agree on tspan
return out_arr
else:
new_tspan = candidates[0]
new_labels = [None]
for i in range(1, ndim):
candidates = [a.labels[i] for a in inputs if (hasattr(a, 'labels') and
a.shape[i] == shape[i] and a.labels[i] is not None)]
if len(candidates) is 1:
new_labels.append(candidates[0])
elif (len(candidates) > 1 and all(labs[j] == candidates[0][j] for
labs in candidates[1:] for j in range(shape[i]))):
new_labels.append(candidates[0])
else:
new_labels.append(None)
if isinstance(out_arr, np.ndarray):
return Timeseries(out_arr, new_tspan, new_labels)
elif isinstance(out_arr, distob.RemoteArray):
return _rts_from_ra(out_arr, new_tspan, new_labels)
elif (isinstance(out_arr, distob.DistArray) and
all(isinstance(ra, RemoteTimeseries) for ra in out_arr._subarrays)):
return _dts_from_da(out_arr, new_tspan, new_labels)
else:
return out_arr | After using the superclass __numpy_ufunc__ to route ufunc computations
on the array data, convert any resulting ndarray, RemoteArray and DistArray
instances into Timeseries, RemoteTimeseries and DistTimeseries instances
if appropriate | entailment |
def _rts_from_ra(ra, tspan, labels, block=True):
"""construct a RemoteTimeseries from a RemoteArray"""
def _convert(a, tspan, labels):
from nsim import Timeseries
return Timeseries(a, tspan, labels)
return distob.call(
_convert, ra, tspan, labels, prefer_local=False, block=block) | construct a RemoteTimeseries from a RemoteArray | entailment |
def _dts_from_da(da, tspan, labels):
"""construct a DistTimeseries from a DistArray"""
sublabels = labels[:]
new_subarrays = []
for i, ra in enumerate(da._subarrays):
if isinstance(ra, RemoteTimeseries):
new_subarrays.append(ra)
else:
if labels[da._distaxis]:
sublabels[da._distaxis] = labels[da._distaxis][
da._si[i]:da._si[i+1]]
new_subarrays.append(_rts_from_ra(ra, tspan, sublabels, False))
new_subarrays = [distob.convert_result(ar) for ar in new_subarrays]
da._subarrays = new_subarrays
da.__class__ = DistTimeseries
da.tspan = tspan
da.labels = labels
da.t = _Timeslice(da)
return da | construct a DistTimeseries from a DistArray | entailment |
def newsim(f, G, y0, name='NewModel', modelType=ItoModel, T=60.0, dt=0.005, repeat=1, identical=True):
"""Make a simulation of the system defined by functions f and G.
dy = f(y,t)dt + G(y,t).dW with initial condition y0
This helper function is for convenience, making it easy to define
one-off simulations interactively in ipython.
Args:
f: callable(y, t) (defined in global scope) returning (n,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) (defined in global scope) returning (n,m) array
Optional matrix-valued function to define noise coefficients of an Ito
SDE system.
y0 (array): Initial condition
name (str): Optional class name for the new model
modelType (type): The type of model to simulate. Must be a subclass of
nsim.Model, for example nsim.ODEModel, nsim.ItoModel or
nsim.StratonovichModel. The default is nsim.ItoModel.
T: Total length of time to simulate, in seconds.
dt: Timestep for numerical integration.
repeat (int, optional)
identical (bool, optional)
Returns:
Simulation
Raises:
SimValueError, SimTypeError
"""
NewModel = newmodel(f, G, y0, name, modelType)
if repeat == 1:
return Simulation(NewModel(), T, dt)
else:
return RepeatedSim(NewModel, T, dt, repeat, identical) | Make a simulation of the system defined by functions f and G.
dy = f(y,t)dt + G(y,t).dW with initial condition y0
This helper function is for convenience, making it easy to define
one-off simulations interactively in ipython.
Args:
f: callable(y, t) (defined in global scope) returning (n,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) (defined in global scope) returning (n,m) array
Optional matrix-valued function to define noise coefficients of an Ito
SDE system.
y0 (array): Initial condition
name (str): Optional class name for the new model
modelType (type): The type of model to simulate. Must be a subclass of
nsim.Model, for example nsim.ODEModel, nsim.ItoModel or
nsim.StratonovichModel. The default is nsim.ItoModel.
T: Total length of time to simulate, in seconds.
dt: Timestep for numerical integration.
repeat (int, optional)
identical (bool, optional)
Returns:
Simulation
Raises:
SimValueError, SimTypeError | entailment |
def newmodel(f, G, y0, name='NewModel', modelType=ItoModel):
"""Use the functions f and G to define a new Model class for simulations.
It will take functions f and G from global scope and make a new Model class
out of them. It will automatically gather any globals used in the definition
of f and G and turn them into attributes of the new Model.
Args:
f: callable(y, t) (defined in global scope) returning (n,) array
Scalar or vector-valued function to define the deterministic part
G: callable(y, t) (defined in global scope) returning (n,m) array
Optional scalar or matrix-valued function to define noise coefficients
of a stochastic system. This should be ``None`` for an ODE system.
y0 (Number or array): Initial condition
name (str): Optional class name for the new model
modelType (type): The type of model to simulate. Must be a subclass of
nsim.Model, for example nsim.ODEModel, nsim.ItoModel or
nsim.StratonovichModel. The default is nsim.ItoModel.
Returns:
new class (subclass of Model)
Raises:
SimValueError, SimTypeError
"""
if not issubclass(modelType, Model):
raise SimTypeError('modelType must be a subclass of nsim.Model')
if not callable(f) or (G is not None and not callable(G)):
raise SimTypeError('f and G must be functions of y and t.')
if G is not None and f.__globals__ is not G.__globals__:
raise SimValueError('f and G must be defined in the same place')
# TODO: validate that f and G are defined at global scope.
# TODO: Handle nonlocals used in f,G so that we can lift this restriction.
if modelType is ODEModel and G is not None and not np.all(G == 0.0):
raise SimValueError('For an ODEModel, noise matrix G should be None')
if G is None or modelType is ODEModel:
newclass = type(name, (ODEModel,), dict())
setattr(newclass, 'f', staticmethod(__clone_function(f, 'f')))
else:
newclass = type(name, (modelType,), dict())
setattr(newclass, 'f', staticmethod(__clone_function(f, 'f')))
setattr(newclass, 'G', staticmethod(__clone_function(G, 'G')))
setattr(newclass, 'y0', copy.deepcopy(y0))
# For any global that is used by the functions f or G, create a
# corresponding attribute in our new class.
globals_used = [x for x in f.__globals__ if (x in f.__code__.co_names or
G is not None and x in G.__code__.co_names)]
for x in globals_used:
if G is None:
setattr(newclass, x, __AccessDict(x, newclass.f.__globals__))
else:
setattr(newclass, x, __AccessDicts(x, newclass.f.__globals__,
newclass.G.__globals__))
# Put the new class into namespace __main__ (to cause dill to pickle it)
newclass.__module__ = '__main__'
import __main__
__main__.__dict__[name] = newclass
return newclass | Use the functions f and G to define a new Model class for simulations.
It will take functions f and G from global scope and make a new Model class
out of them. It will automatically gather any globals used in the definition
of f and G and turn them into attributes of the new Model.
Args:
f: callable(y, t) (defined in global scope) returning (n,) array
Scalar or vector-valued function to define the deterministic part
G: callable(y, t) (defined in global scope) returning (n,m) array
Optional scalar or matrix-valued function to define noise coefficients
of a stochastic system. This should be ``None`` for an ODE system.
y0 (Number or array): Initial condition
name (str): Optional class name for the new model
modelType (type): The type of model to simulate. Must be a subclass of
nsim.Model, for example nsim.ODEModel, nsim.ItoModel or
nsim.StratonovichModel. The default is nsim.ItoModel.
Returns:
new class (subclass of Model)
Raises:
SimValueError, SimTypeError | entailment |
def __clone_function(f, name=None):
"""Make a new version of a function that has its own independent copy
of any globals that it uses directly, and has its own name.
All other attributes are assigned from the original function.
Args:
f: the function to clone
name (str): the name for the new function (if None, keep the same name)
Returns:
A copy of the function f, having its own copy of any globals used
Raises:
SimValueError
"""
if not isinstance(f, types.FunctionType):
raise SimTypeError('Given parameter is not a function.')
if name is None:
name = f.__name__
newglobals = f.__globals__.copy()
globals_used = [x for x in f.__globals__ if x in f.__code__.co_names]
for x in globals_used:
gv = f.__globals__[x]
if isinstance(gv, types.FunctionType):
# Recursively clone any global functions used by this function.
newglobals[x] = __clone_function(gv)
elif isinstance(gv, types.ModuleType):
newglobals[x] = gv
else:
# If it is something else, deep copy it.
newglobals[x] = copy.deepcopy(gv)
newfunc = types.FunctionType(
f.__code__, newglobals, name, f.__defaults__, f.__closure__)
return newfunc | Make a new version of a function that has its own independent copy
of any globals that it uses directly, and has its own name.
All other attributes are assigned from the original function.
Args:
f: the function to clone
name (str): the name for the new function (if None, keep the same name)
Returns:
A copy of the function f, having its own copy of any globals used
Raises:
SimValueError | entailment |
def expand_dims(self, axis):
"""Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
"""
if axis <= self._distaxis:
subaxis = axis
new_distaxis = self._distaxis + 1
else:
subaxis = axis - 1
new_distaxis = self._distaxis
new_subts = [rts.expand_dims(subaxis) for rts in self._subarrays]
if axis == 0:
# prepended an axis: no longer a Timeseries
return distob.DistArray(new_subts, new_distaxis)
else:
axislabels = self.labels[self._distaxis]
return DistTimeseries(new_subts, new_distaxis, axislabels) | Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted. | entailment |
def absolute(self):
"""Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
"""
da = distob.vectorize(np.absolute)(self)
return _dts_from_da(da, self.tspan, self.labels) | Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2) | entailment |
def angle(self, deg=False):
"""Return the angle of a complex Timeseries
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
"""
if self.dtype.str[1] != 'c':
warnings.warn('angle() is intended for complex-valued timeseries',
RuntimeWarning, 1)
da = distob.vectorize(np.angle)(self, deg)
return _dts_from_da(da, self.tspan, self.labels) | Return the angle of a complex Timeseries
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64. | entailment |
def coupling(self, source_y, target_y, weight):
"""How to couple the output of one subsystem to the input of another.
This is a fallback default coupling function that should usually be
replaced with your own.
This example coupling function takes the mean of all variables of the
source subsystem and uses that value weighted by the connection
strength to drive all variables of the target subsystem.
Arguments:
source_y (array of shape (d,)): State of the source subsystem.
target_y (array of shape (d,)): State of target subsystem.
weight (float): the connection strength for this connection.
Returns:
input (array of shape (d,)): Values to drive each variable of the
target system.
"""
return np.ones_like(target_y)*np.mean(source_y)*weight | How to couple the output of one subsystem to the input of another.
This is a fallback default coupling function that should usually be
replaced with your own.
This example coupling function takes the mean of all variables of the
source subsystem and uses that value weighted by the connection
strength to drive all variables of the target subsystem.
Arguments:
source_y (array of shape (d,)): State of the source subsystem.
target_y (array of shape (d,)): State of target subsystem.
weight (float): the connection strength for this connection.
Returns:
input (array of shape (d,)): Values to drive each variable of the
target system. | entailment |
def f(self, y, t):
"""Deterministic term f of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(or for an ODE network system without noise, dy/dt = f(y, t))
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
f (array of shape (d,)): Defines the deterministic term of the
complete network system
"""
coupling = self.coupling_function[0]
res = np.empty_like(self.y0)
for j, m in enumerate(self.submodels):
slicej = slice(self._si[j], self._si[j+1])
target_y = y[slicej] # target node state
res[slicej] = m.f(target_y, t) # deterministic part of submodel j
# get indices of all source nodes that provide input to node j:
sources = np.nonzero(self.network[:,j])[0]
for i in sources:
weight = self.network[i, j]
source_y = y[slice(self._si[i], self._si[i+1])] # source state
res[slicej] += coupling(source_y, target_y, weight)
return res | Deterministic term f of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(or for an ODE network system without noise, dy/dt = f(y, t))
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
f (array of shape (d,)): Defines the deterministic term of the
complete network system | entailment |
def G(self, y, t):
"""Noise coefficient matrix G of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(for an ODE network system without noise this function is not used)
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
G (array of shape (d, m)): where m is the number of independent
Wiener processes driving the complete network system. The noise
coefficient matrix G defines the stochastic term of the system.
"""
if self._independent_noise:
# then G matrix consists of submodel Gs diagonally concatenated:
res = np.zeros((self.dimension, self.nnoises))
offset = 0
for j, m in enumerate(self.submodels):
slicej = slice(self._si[j], self._si[j+1])
ix = (slicej, slice(offset, offset + self._nsubnoises[j]))
res[ix] = m.G(y[slicej], t) # submodel noise coefficient matrix
offset += self._nsubnoises[j]
else:
# identical driving: G consists of submodel Gs stacked vertically
res = np.empty((self.dimension, self.nnoises))
for j, m in enumerate(self.submodels):
slicej = slice(self._si[j], self._si[j+1])
ix = (slicej, slice(None))
res[ix] = m.G(y[slicej], t) # submodel noise coefficient matrix
return res | Noise coefficient matrix G of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(for an ODE network system without noise this function is not used)
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
G (array of shape (d, m)): where m is the number of independent
Wiener processes driving the complete network system. The noise
coefficient matrix G defines the stochastic term of the system. | entailment |
def _scalar_to_vector(self, m):
"""Allow submodels with scalar equations. Convert to 1D vector systems.
Args:
m (Model)
"""
if not isinstance(m.y0, numbers.Number):
return m
else:
m = copy.deepcopy(m)
t0 = 0.0
if isinstance(m.y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(m.y0)
y0_orig = m.y0
m.y0 = np.array([m.y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_coupling_fn(fn):
def newfn(source_y, target_y, weight):
return np.array([fn(source_y[0], target_y[0], weight)])
newfn.__name__ = fn.__name__
return newfn
if isinstance(m.f(y0_orig, t0), numbers.Number):
m.f = make_vector_fn(m.f)
if hasattr(m, 'G') and isinstance(m.G(y0_orig,t0), numbers.Number):
m.G = make_matrix_fn(m.G)
if (hasattr(m, 'coupling') and
isinstance(m.coupling(y0_orig, y0_orig, 0.5),
numbers.Number)):
m.coupling = make_coupling_fn(m.coupling)
return m | Allow submodels with scalar equations. Convert to 1D vector systems.
Args:
m (Model) | entailment |
def _reshape_timeseries(self, ts):
"""Introduce a new axis 2 that ranges across nodes of the network"""
if np.count_nonzero(np.diff(self._sublengths)) == 0:
# then all submodels have the same dimension, so can reshape array
# in place without copying data:
subdim = self.dimension // self._n
shp = list(ts.shape)
shp[1] = self._n
shp.insert(2, subdim)
ts = ts.reshape(tuple(shp)).swapaxes(1, 2)
# label variables only if all sub-models agree on the labels:
all_var_labels = [m.labels for m in self.submodels]
var_labels = all_var_labels[0]
if all(v == var_labels for v in all_var_labels[1:]):
ts.labels[1] = var_labels
ts.labels[2] = self._node_labels()
return ts
else:
# will pad with zeros for submodels with less variables
subdim = max(self._sublengths)
shp = list(ts.shape)
shp[1] = subdim
shp.insert(2, self._n)
ar = np.zeros(shp)
labels = ts.labels
labels.insert(2, self._node_labels())
for k in range(self._n):
sl = slice(self._si[k], self._si[k+1])
ar[:,:,k,...] = ts[:,sl,...]
return Timeseries(ar, ts.tspan, labels) | Introduce a new axis 2 that ranges across nodes of the network | entailment |
def _reshape_output(self, ts):
"""Introduce a new axis 2 that ranges across nodes of the network"""
subodim = len(self.submodels[0].output_vars)
shp = list(ts.shape)
shp[1] = subodim
shp.insert(2, self._n)
ts = ts.reshape(tuple(shp))
ts.labels[2] = self._node_labels()
return ts | Introduce a new axis 2 that ranges across nodes of the network | entailment |
def timeseries(self):
"""Simulated time series"""
if self._timeseries is None:
self.compute()
if isinstance(self.system, NetworkModel):
return self.system._reshape_timeseries(self._timeseries)
else:
return self._timeseries | Simulated time series | entailment |
def output(self):
"""Simulated model output"""
if self._timeseries is None:
self.compute()
output = self._timeseries[:, self.system.output_vars]
if isinstance(self.system, NetworkModel):
return self.system._reshape_output(output)
else:
return output | Simulated model output | entailment |
def output(self):
"""Rank 3 array representing output time series. Axis 0 is time,
axis 1 ranges across output variables of a single simulation,
axis 2 ranges across different simulation instances."""
subts = [s.output for s in self.sims]
sub_ndim = subts[0].ndim
if sub_ndim is 1:
subts = [distob.expand_dims(ts, 1) for ts in subts]
sub_ndim += 1
nodeaxis = sub_ndim
subts = [distob.expand_dims(ts, nodeaxis) for ts in subts]
ts = subts[0].concatenate(subts[1:], axis=nodeaxis)
ts.labels[nodeaxis] = self._node_labels()
return ts | Rank 3 array representing output time series. Axis 0 is time,
axis 1 ranges across output variables of a single simulation,
axis 2 ranges across different simulation instances. | entailment |
def _tosub(self, ix):
"""Given an integer index ix into the list of sims, returns the pair
(s, m) where s is the relevant subsim and m is the subindex into s.
So self[ix] == self._subsims[s][m]
"""
N = self._n
if ix >= N or ix < -N:
raise IndexError(
'index %d out of bounds for list of %d sims' % (ix, N))
if ix < 0:
ix += N
for s in range(0, self._n):
if self._si[s + 1] - 1 >= ix:
break
m = ix - self._si[s]
return s, m | Given an integer index ix into the list of sims, returns the pair
(s, m) where s is the relevant subsim and m is the subindex into s.
So self[ix] == self._subsims[s][m] | entailment |
def _tosubs(self, ixlist):
"""Maps a list of integer indices to sub-indices.
ixlist can contain repeated indices and does not need to be sorted.
Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a
list of lists of subindices m (one list for each subsim in ss).
"""
n = len(ixlist)
N = self._n
ss = []
ms = []
if n == 0:
return ss, ms
j = 0 # the position in ixlist currently being processed
ix = ixlist[j]
if ix >= N or ix < -N:
raise IndexError(
'index %d out of bounds for list of %d sims' % (ix, N))
if ix < 0:
ix += N
while j < n:
for s in range(0, self._n):
low = self._si[s]
high = self._si[s + 1]
if ix >= low and ix < high:
ss.append(s)
msj = [ix - low]
j += 1
while j < n:
ix = ixlist[j]
if ix >= N or ix < -N:
raise IndexError(
'index %d out of bounds for list of %d sims' % (
ix, N))
if ix < 0:
ix += N
if ix < low or ix >= high:
break
msj.append(ix - low)
j += 1
ms.append(msj)
if ix < low:
break
return ss, ms | Maps a list of integer indices to sub-indices.
ixlist can contain repeated indices and does not need to be sorted.
Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a
list of lists of subindices m (one list for each subsim in ss). | entailment |
def output(self):
"""Rank 3 array representing output time series. Axis 0 is time,
axis 1 ranges across output variables of a single simulation, axis 2
ranges across different simulation instances."""
subts = [rms.output for rms in self._subsims]
distaxis = subts[0].ndim - 1
return DistTimeseries(subts, distaxis, self._node_labels()) | Rank 3 array representing output time series. Axis 0 is time,
axis 1 ranges across output variables of a single simulation, axis 2
ranges across different simulation instances. | entailment |
def crossing_times(ts, c=0.0, d=0.0):
"""For a single variable timeseries, find the times at which the
value crosses ``c`` from above or below. Can optionally set a non-zero
``d`` to impose the condition that the value must wander at least ``d``
units away from ``c`` between crossings.
If the timeseries begins (or ends) exactly at ``c``, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last excursions are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Args:
ts: Timeseries (single variable)
c (float): Critical value at which to report crossings.
d (float): Optional min distance from c to be attained between crossings.
Returns:
array of float
"""
#TODO support multivariate time series
ts = ts.squeeze()
if ts.ndim is not 1:
raise ValueError('Currently can only use on single variable timeseries')
# Translate to put the critical value at zero:
ts = ts - c
tsa = ts[0:-1]
tsb = ts[1:]
# Time indices where phase crosses or reaches zero from below or above
zc = np.nonzero((tsa < 0) & (tsb >= 0) | (tsa > 0) & (tsb <= 0))[0] + 1
# Estimate crossing time interpolated linearly within a single time step
va = ts[zc-1]
vb = ts[zc]
ct = (np.abs(vb)*ts.tspan[zc-1] +
np.abs(va)*ts.tspan[zc]) / np.abs(vb - va) # denominator always !=0
# Also include starting time if we started exactly at zero
if ts[0] == 0.0:
zc = np.r_[np.array([0]), zc]
ct = np.r_[np.array([ts.tspan[0]]), ct]
if d == 0.0 or ct.shape[0] is 0:
return ct
# Time indices where value crosses c+d or c-d:
dc = np.nonzero((tsa < d) & (tsb >= d) | (tsa > -d) & (tsb <= -d))[0] + 1
# Select those zero-crossings separated by at least one d-crossing
splice = np.searchsorted(dc, zc)
which_zc = np.r_[np.array([0]), np.nonzero(splice[0:-1] - splice[1:])[0] +1]
return ct[which_zc] | For a single variable timeseries, find the times at which the
value crosses ``c`` from above or below. Can optionally set a non-zero
``d`` to impose the condition that the value must wander at least ``d``
units away from ``c`` between crossings.
If the timeseries begins (or ends) exactly at ``c``, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last excursions are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Args:
ts: Timeseries (single variable)
c (float): Critical value at which to report crossings.
d (float): Optional min distance from c to be attained between crossings.
Returns:
array of float | entailment |
def first_return_times(ts, c=None, d=0.0):
"""For a single variable time series, first wait until the time series
attains the value c for the first time. Then record the time intervals
between successive returns to c. If c is not given, the default is the mean
of the time series.
Args:
ts: Timeseries (single variable)
c (float): Optional target value (default is the mean of the time series)
d (float): Optional min distance from c to be attained between returns
Returns:
array of time intervals (Can take the mean of these to estimate the
expected first return time)
"""
ts = np.squeeze(ts)
if c is None:
c = ts.mean()
if ts.ndim <= 1:
return np.diff(ts.crossing_times(c, d))
else:
return np.hstack(
[ts[..., i].first_return_times(c, d) for i in range(ts.shape[-1])]) | For a single variable time series, first wait until the time series
attains the value c for the first time. Then record the time intervals
between successive returns to c. If c is not given, the default is the mean
of the time series.
Args:
ts: Timeseries (single variable)
c (float): Optional target value (default is the mean of the time series)
d (float): Optional min distance from c to be attained between returns
Returns:
array of time intervals (Can take the mean of these to estimate the
expected first return time) | entailment |
def autocorrelation(ts, normalized=False, unbiased=False):
"""
Returns the discrete, linear convolution of a time series with itself,
optionally using unbiased normalization.
N.B. Autocorrelation estimates are necessarily inaccurate for longer lags,
as there are less pairs of points to convolve separated by that lag.
Therefore best to throw out the results except for shorter lags, e.g.
keep lags from tau=0 up to one quarter of the total time series length.
Args:
normalized (boolean): If True, the time series will first be normalized
to a mean of 0 and variance of 1. This gives autocorrelation 1 at
zero lag.
unbiased (boolean): If True, the result at each lag m will be scaled by
1/(N-m). This gives an unbiased estimation of the autocorrelation of a
stationary process from a finite length sample.
Ref: S. J. Orfanidis (1996) "Optimum Signal Processing", 2nd Ed.
"""
ts = np.squeeze(ts)
if ts.ndim <= 1:
if normalized:
ts = (ts - ts.mean())/ts.std()
N = ts.shape[0]
ar = np.asarray(ts)
acf = np.correlate(ar, ar, mode='full')
outlen = (acf.shape[0] + 1) / 2
acf = acf[(outlen - 1):]
if unbiased:
factor = np.array([1.0/(N - m) for m in range(0, outlen)])
acf = acf * factor
dt = (ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1.0)
lags = np.arange(outlen)*dt
return Timeseries(acf, tspan=lags, labels=ts.labels)
else:
# recursively handle arrays of dimension > 1
lastaxis = ts.ndim - 1
m = ts.shape[lastaxis]
acfs = [ts[...,i].autocorrelation(normalized, unbiased)[...,np.newaxis]
for i in range(m)]
res = distob.concatenate(acfs, axis=lastaxis)
res.labels[lastaxis] = ts.labels[lastaxis]
return res | Returns the discrete, linear convolution of a time series with itself,
optionally using unbiased normalization.
N.B. Autocorrelation estimates are necessarily inaccurate for longer lags,
as there are less pairs of points to convolve separated by that lag.
Therefore best to throw out the results except for shorter lags, e.g.
keep lags from tau=0 up to one quarter of the total time series length.
Args:
normalized (boolean): If True, the time series will first be normalized
to a mean of 0 and variance of 1. This gives autocorrelation 1 at
zero lag.
unbiased (boolean): If True, the result at each lag m will be scaled by
1/(N-m). This gives an unbiased estimation of the autocorrelation of a
stationary process from a finite length sample.
Ref: S. J. Orfanidis (1996) "Optimum Signal Processing", 2nd Ed. | entailment |
def fan_speed(self, value):
"""Verifies the value is between 1 and 9 inclusively."""
if value not in range(1, 10):
raise exceptions.RoasterValueError
self._fan_speed.value = value | Verifies the value is between 1 and 9 inclusively. | entailment |
def heat_setting(self, value):
"""Verifies that the heat setting is between 0 and 3."""
if value not in range(0, 4):
raise exceptions.RoasterValueError
self._heat_setting.value = value | Verifies that the heat setting is between 0 and 3. | entailment |
def heater_level(self, value):
"""Verifies that the heater_level is between 0 and heater_segments.
Can only be called when freshroastsr700 object is initialized
with ext_sw_heater_drive=True. Will throw RoasterValueError
otherwise."""
if self._ext_sw_heater_drive:
if value not in range(0, self._heater_bangbang_segments+1):
raise exceptions.RoasterValueError
self._heater_level.value = value
else:
raise exceptions.RoasterValueError | Verifies that the heater_level is between 0 and heater_segments.
Can only be called when freshroastsr700 object is initialized
with ext_sw_heater_drive=True. Will throw RoasterValueError
otherwise. | entailment |
def set_state_transition_func(self, func):
"""THIS FUNCTION MUST BE CALLED BEFORE CALLING
freshroastsr700.auto_connect().
Set, or re-set, the state transition function callback.
The supplied function will be called from a separate thread within
freshroastsr700, triggered by a separate, internal child process.
This function will fail if the freshroastsr700 device is already
connected to hardware, because by that time, the timer process
and thread have already been spawned.
Args:
state_transition_func (func): the function to call for every
state transition. A state transition occurs whenever the
freshroastsr700's time_remaining value counts down to 0.
Returns:
nothing
"""
if self._connected.value:
logging.error("freshroastsr700.set_state_transition_func must be "
"called before freshroastsr700.auto_connect()."
" Not registering func.")
return False
# no connection yet. so OK to set func pointer
self._create_state_transition_system(func)
return True | THIS FUNCTION MUST BE CALLED BEFORE CALLING
freshroastsr700.auto_connect().
Set, or re-set, the state transition function callback.
The supplied function will be called from a separate thread within
freshroastsr700, triggered by a separate, internal child process.
This function will fail if the freshroastsr700 device is already
connected to hardware, because by that time, the timer process
and thread have already been spawned.
Args:
state_transition_func (func): the function to call for every
state transition. A state transition occurs whenever the
freshroastsr700's time_remaining value counts down to 0.
Returns:
nothing | entailment |
def update_data_run(self, event_to_wait_on):
"""This is the thread that listens to an event from
the comm process to execute the update_data_func callback
in the context of the main process.
"""
# with the daemon=Turue setting, this thread should
# quit 'automatically'
while event_to_wait_on.wait():
event_to_wait_on.clear()
if self.update_data_callback_kill_event.is_set():
return
self.update_data_func() | This is the thread that listens to an event from
the comm process to execute the update_data_func callback
in the context of the main process. | entailment |
def state_transition_run(self, event_to_wait_on):
"""This is the thread that listens to an event from
the timer process to execute the state_transition_func callback
in the context of the main process.
"""
# with the daemon=Turue setting, this thread should
# quit 'automatically'
while event_to_wait_on.wait():
event_to_wait_on.clear()
if self.state_transition_callback_kill_event.is_set():
return
self.state_transition_func() | This is the thread that listens to an event from
the timer process to execute the state_transition_func callback
in the context of the main process. | entailment |
def _connect(self):
"""Do not call this directly - call auto_connect() or connect(),
which will call _connect() for you.
Connects to the roaster and creates communication thread.
Raises a RoasterLokkupError exception if the hardware is not found.
"""
# the following call raises a RoasterLookupException when the device
# is not found. It is
port = utils.find_device('1A86:5523')
# on some systems, after the device port is added to the device list,
# it can take up to 20 seconds after USB insertion for
# the port to become available... (!)
# let's put a safety timeout in here as a precaution
wait_timeout = time.time() + 40.0 # should be PLENTY of time!
# let's update the _connect_state while we're at it...
self._connect_state.value = self.CS_CONNECTING
connect_success = False
while time.time() < wait_timeout:
try:
self._ser = serial.Serial(
port=port,
baudrate=9600,
bytesize=8,
parity='N',
stopbits=1.5,
timeout=0.25,
xonxoff=False,
rtscts=False,
dsrdtr=False)
connect_success = True
break
except serial.SerialException:
time.sleep(0.5)
if not connect_success:
# timeout on attempts
raise exceptions.RoasterLookupError
self._initialize() | Do not call this directly - call auto_connect() or connect(),
which will call _connect() for you.
Connects to the roaster and creates communication thread.
Raises a RoasterLokkupError exception if the hardware is not found. | entailment |
def _initialize(self):
"""Sends the initialization packet to the roaster."""
self._header.value = b'\xAA\x55'
self._current_state.value = b'\x00\x00'
s = self._generate_packet()
self._ser.write(s)
self._header.value = b'\xAA\xAA'
self._current_state.value = b'\x02\x01'
return self._read_existing_recipe() | Sends the initialization packet to the roaster. | entailment |
def connect(self):
"""Attempt to connect to hardware immediately. Will not retry.
Check freshroastsr700.connected or freshroastsr700.connect_state
to verify result.
Raises:
freshroastsr700.exeptions.RoasterLookupError
No hardware connected to the computer.
"""
self._start_connect(self.CA_SINGLE_SHOT)
while(self._connect_state.value == self.CS_ATTEMPTING_CONNECT or
self._connect_state.value == self.CS_CONNECTING):
time.sleep(0.1)
if self.CS_CONNECTED != self._connect_state.value:
raise exceptions.RoasterLookupError | Attempt to connect to hardware immediately. Will not retry.
Check freshroastsr700.connected or freshroastsr700.connect_state
to verify result.
Raises:
freshroastsr700.exeptions.RoasterLookupError
No hardware connected to the computer. | entailment |
def _start_connect(self, connect_type):
"""Starts the connection process, as called (internally)
from the user context, either from auto_connect() or connect().
Never call this from the _comm() process context.
"""
if self._connect_state.value != self.CS_NOT_CONNECTED:
# already done or in process, assume success
return
self._connected.value = 0
self._connect_state.value = self.CS_ATTEMPTING_CONNECT
# tell comm process to attempt connection
self._attempting_connect.value = connect_type
# EXTREMELY IMPORTANT - for this to work at all in Windows,
# where the above processes are spawned (vs forked in Unix),
# the thread objects (as sattributes of this object) must be
# assigned to this object AFTER we have spawned the processes.
# That way, multiprocessing can pickle the freshroastsr700
# successfully. (It can't pickle thread-related stuff.)
if self.update_data_func is not None:
# Need to launch the thread that will listen to the event
self._create_update_data_system(
None, setFunc=False, createThread=True)
self.update_data_thread.start()
if self.state_transition_func is not None:
# Need to launch the thread that will listen to the event
self._create_state_transition_system(
None, setFunc=False, createThread=True)
self.state_transition_thread.start() | Starts the connection process, as called (internally)
from the user context, either from auto_connect() or connect().
Never call this from the _comm() process context. | entailment |
def _auto_connect(self):
"""Attempts to connect to the roaster every quarter of a second."""
while not self._teardown.value:
try:
self._connect()
return True
except exceptions.RoasterLookupError:
time.sleep(.25)
return False | Attempts to connect to the roaster every quarter of a second. | entailment |
def _comm(self, thermostat=False,
kp=0.06, ki=0.0075, kd=0.01,
heater_segments=8, ext_sw_heater_drive=False,
update_data_event=None):
"""Do not call this directly - call auto_connect(), which will spawn
comm() for you.
This is the main communications loop to the roaster.
whenever a valid packet is received from the device, if an
update_data_event is available, it will be signalled.
Args:
thermostat (bool): thermostat mode.
if set to True, turns on thermostat mode. In thermostat
mode, freshroastsr700 takes control of heat_setting and does
software PID control to hit the demanded target_temp.
ext_sw_heater_drive (bool): enable direct control over the internal
heat_controller object. Defaults to False. When set to True, the
thermostat field is IGNORED, and assumed to be False. Direct
control over the software heater_level means that the
PID controller cannot control the heater. Since thermostat and
ext_sw_heater_drive cannot be allowed to both be True, this arg
is given precedence over the thermostat arg.
kp (float): Kp value to use for PID control. Defaults to 0.06.
ki (float): Ki value to use for PID control. Defaults to 0.0075.
kd (float): Kd value to use for PID control. Defaults to 0.01.
heater_segments (int): the pseudo-control range for the internal
heat_controller object. Defaults to 8.
update_data_event (multiprocessing.Event): If set, allows the
comm_process to signal to the parent process that new device data
is available.
Returns:
nothing
"""
# since this process is started with daemon=True, it should exit
# when the owning process terminates. Therefore, safe to loop forever.
while not self._teardown.value:
# waiting for command to attempt connect
# print( "waiting for command to attempt connect")
while self._attempting_connect.value == self.CA_NONE:
time.sleep(0.25)
if self._teardown.value:
break
# if we're tearing down, bail now.
if self._teardown.value:
break
# we got the command to attempt to connect
# change state to 'attempting_connect'
self._connect_state.value = self.CS_ATTEMPTING_CONNECT
# attempt connection
if self.CA_AUTO == self._attempting_connect.value:
# this call will block until a connection is achieved
# it will also set _connect_state to CS_CONNECTING
# if appropriate
if self._auto_connect():
# when we unblock, it is an indication of a successful
# connection
self._connected.value = 1
self._connect_state.value = self.CS_CONNECTED
else:
# failure, normally due to a timeout
self._connected.value = 0
self._connect_state.value = self.CS_NOT_CONNECTED
# we failed to connect - start over from the top
# reset flag
self._attempting_connect.value = self.CA_NONE
continue
elif self.CA_SINGLE_SHOT == self._attempting_connect.value:
# try once, now, if failure, start teh big loop over
try:
self._connect()
self._connected.value = 1
self._connect_state.value = self.CS_CONNECTED
except exceptions.RoasterLookupError:
self._connected.value = 0
self._connect_state.value = self.CS_NOT_CONNECTED
if self._connect_state.value != self.CS_CONNECTED:
# we failed to connect - start over from the top
# reset flag
self._attempting_connect.value = self.CA_NONE
continue
else:
# shouldn't be here
# reset flag
self._attempting_connect.value = self.CA_NONE
continue
# We are connected!
# print( "We are connected!")
# reset flag right away
self._attempting_connect.value = self.CA_NONE
# Initialize PID controller if thermostat function was specified at
# init time
pidc = None
heater = None
if(thermostat):
pidc = pid.PID(kp, ki, kd,
Output_max=heater_segments,
Output_min=0
)
if thermostat or ext_sw_heater_drive:
heater = heat_controller(number_of_segments=heater_segments)
read_state = self.LOOKING_FOR_HEADER_1
r = []
write_errors = 0
read_errors = 0
while not self._disconnect.value:
start = datetime.datetime.now()
# write to device
if not self._write_to_device():
logging.error('comm - _write_to_device() failed!')
write_errors += 1
if write_errors > 3:
# it's time to consider the device as being "gone"
logging.error('comm - 3 successive write '
'failures, disconnecting.')
self._disconnect.value = 1
continue
else:
# reset write_errors
write_errors = 0
# read from device
try:
while self._ser.in_waiting:
_byte = self._ser.read(1)
read_state, r, err = (
self._process_reponse_byte(
read_state, _byte, r, update_data_event))
except IOError:
# typically happens when device is suddenly unplugged
logging.error('comm - read from device failed!')
read_errors += 1
if write_errors > 3:
# it's time to consider the device as being "gone"
logging.error('comm - 3 successive read '
'failures, disconnecting.')
self._disconnect.value = 1
continue
else:
read_errors = 0
# next, drive SW heater when using
# thermostat mode (PID controller calcs)
# or in external sw heater drive mode,
# when roasting.
if thermostat or ext_sw_heater_drive:
if 'roasting' == self.get_roaster_state():
if heater.about_to_rollover():
# it's time to use the PID controller value
# and set new output level on heater!
if ext_sw_heater_drive:
# read user-supplied value
heater.heat_level = self._heater_level.value
else:
# thermostat
output = pidc.update(
self.current_temp, self.target_temp)
heater.heat_level = output
# make this number visible to other processes...
self._heater_level.value = heater.heat_level
# read bang-bang heater output array element & apply it
if heater.generate_bangbang_output():
# ON
self.heat_setting = 3
else:
# OFF
self.heat_setting = 0
else:
# for all other states, heat_level = OFF
heater.heat_level = 0
# make this number visible to other processes...
self._heater_level.value = heater.heat_level
self.heat_setting = 0
# calculate sleep time to stick to 0.25sec period
comp_time = datetime.datetime.now() - start
sleep_duration = 0.25 - comp_time.total_seconds()
if sleep_duration > 0:
time.sleep(sleep_duration)
self._ser.close()
# reset disconnect flag
self._disconnect.value = 0
# reset connection values
self._connected.value = 0
self._connect_state.value = self.CS_NOT_CONNECTED | Do not call this directly - call auto_connect(), which will spawn
comm() for you.
This is the main communications loop to the roaster.
whenever a valid packet is received from the device, if an
update_data_event is available, it will be signalled.
Args:
thermostat (bool): thermostat mode.
if set to True, turns on thermostat mode. In thermostat
mode, freshroastsr700 takes control of heat_setting and does
software PID control to hit the demanded target_temp.
ext_sw_heater_drive (bool): enable direct control over the internal
heat_controller object. Defaults to False. When set to True, the
thermostat field is IGNORED, and assumed to be False. Direct
control over the software heater_level means that the
PID controller cannot control the heater. Since thermostat and
ext_sw_heater_drive cannot be allowed to both be True, this arg
is given precedence over the thermostat arg.
kp (float): Kp value to use for PID control. Defaults to 0.06.
ki (float): Ki value to use for PID control. Defaults to 0.0075.
kd (float): Kd value to use for PID control. Defaults to 0.01.
heater_segments (int): the pseudo-control range for the internal
heat_controller object. Defaults to 8.
update_data_event (multiprocessing.Event): If set, allows the
comm_process to signal to the parent process that new device data
is available.
Returns:
nothing | entailment |
def _timer(self, state_transition_event=None):
"""Timer loop used to keep track of the time while roasting or
cooling. If the time remaining reaches zero, the roaster will call the
supplied state transistion function or the roaster will be set to
the idle state."""
while not self._teardown.value:
state = self.get_roaster_state()
if(state == 'roasting' or state == 'cooling'):
time.sleep(1)
self.total_time += 1
if(self.time_remaining > 0):
self.time_remaining -= 1
else:
if(state_transition_event is not None):
state_transition_event.set()
else:
self.idle()
else:
time.sleep(0.01) | Timer loop used to keep track of the time while roasting or
cooling. If the time remaining reaches zero, the roaster will call the
supplied state transistion function or the roaster will be set to
the idle state. | entailment |
def get_roaster_state(self):
"""Returns a string based upon the current state of the roaster. Will
raise an exception if the state is unknown.
Returns:
'idle' if idle,
'sleeping' if sleeping,
'cooling' if cooling,
'roasting' if roasting,
'connecting' if in hardware connection phase,
'unknown' otherwise
"""
value = self._current_state.value
if(value == b'\x02\x01'):
return 'idle'
elif(value == b'\x04\x04'):
return 'cooling'
elif(value == b'\x08\x01'):
return 'sleeping'
# handle null bytes as empty strings
elif(value == b'\x00\x00' or value == b''):
return 'connecting'
elif(value == b'\x04\x02'):
return 'roasting'
else:
return 'unknown' | Returns a string based upon the current state of the roaster. Will
raise an exception if the state is unknown.
Returns:
'idle' if idle,
'sleeping' if sleeping,
'cooling' if cooling,
'roasting' if roasting,
'connecting' if in hardware connection phase,
'unknown' otherwise | entailment |
def _generate_packet(self):
"""Generates a packet based upon the current class variables. Note that
current temperature is not sent, as the original application sent zeros
to the roaster for the current temperature."""
roaster_time = utils.seconds_to_float(self._time_remaining.value)
packet = (
self._header.value +
self._temp_unit.value +
self._flags.value +
self._current_state.value +
struct.pack(">B", self._fan_speed.value) +
struct.pack(">B", int(round(roaster_time * 10.0))) +
struct.pack(">B", self._heat_setting.value) +
b'\x00\x00' +
self._footer)
return packet | Generates a packet based upon the current class variables. Note that
current temperature is not sent, as the original application sent zeros
to the roaster for the current temperature. | entailment |
def heat_level(self, value):
"""Set the desired output level. Must be between 0 and
number_of_segments inclusive."""
if value < 0:
self._heat_level = 0
elif round(value) > self._num_segments:
self._heat_level = self._num_segments
else:
self._heat_level = int(round(value)) | Set the desired output level. Must be between 0 and
number_of_segments inclusive. | entailment |
def generate_bangbang_output(self):
"""Generates the latest on or off pulse in
the string of on (True) or off (False) pulses
according to the desired heat_level setting. Successive calls
to this function will return the next value in the
on/off array series. Call this at control loop rate to
obtain the necessary on/off pulse train.
This system will not work if the caller expects to be able
to specify a new heat_level at every control loop iteration.
Only the value set at every number_of_segments iterations
will be picked up for output! Call about_to_rollover to determine
if it's time to set a new heat_level, if a new level is desired."""
if self._current_index >= self._num_segments:
# we're due to switch over to the next
# commanded heat_level
self._heat_level_now = self._heat_level
# reset array index
self._current_index = 0
# return output
out = self._output_array[self._heat_level_now][self._current_index]
self._current_index += 1
return out | Generates the latest on or off pulse in
the string of on (True) or off (False) pulses
according to the desired heat_level setting. Successive calls
to this function will return the next value in the
on/off array series. Call this at control loop rate to
obtain the necessary on/off pulse train.
This system will not work if the caller expects to be able
to specify a new heat_level at every control loop iteration.
Only the value set at every number_of_segments iterations
will be picked up for output! Call about_to_rollover to determine
if it's time to set a new heat_level, if a new level is desired. | entailment |
def update_data(self):
"""This is a method that will be called every time a packet is opened
from the roaster."""
time_elapsed = datetime.datetime.now() - self.start_time
crntTemp = self.roaster.current_temp
targetTemp = self.roaster.target_temp
heaterLevel = self.roaster.heater_level
# print(
# "Time: %4.6f, crntTemp: %d, targetTemp: %d, heaterLevel: %d" %
# (time_elapsed.total_seconds(), crntTemp, targetTemp, heaterLevel))
self.file.write(
"%4.6f,%d,%d,%d\n" %
(time_elapsed.total_seconds(), crntTemp, targetTemp, heaterLevel)) | This is a method that will be called every time a packet is opened
from the roaster. | entailment |
def next_state(self):
"""This is a method that will be called when the time remaining ends.
The current state can be: roasting, cooling, idle, sleeping, connecting,
or unkown."""
self.active_recipe_item += 1
if self.active_recipe_item >= len(self.recipe):
# we're done!
return
# show state step on screen
print("--------------------------------------------")
print("Setting next process step: %d" % self.active_recipe_item)
print("time:%d, target: %ddegF, fan: %d, state: %s" %
(self.recipe[self.active_recipe_item]['time_remaining'],
self.recipe[self.active_recipe_item]['target_temp'],
self.recipe[self.active_recipe_item]['fan_speed'],
self.recipe[self.active_recipe_item]['state']
))
print("--------------------------------------------")
# set values for next state
self.roaster.time_remaining = (
self.recipe[self.active_recipe_item]['time_remaining'])
self.roaster.target_temp = (
self.recipe[self.active_recipe_item]['target_temp'])
self.roaster.fan_speed = (
self.recipe[self.active_recipe_item]['fan_speed'])
# set state
if(self.recipe[self.active_recipe_item]['state'] == 'roasting'):
self.roaster.roast()
elif(self.recipe[self.active_recipe_item]['state'] == 'cooling'):
self.roaster.cool()
elif(self.recipe[self.active_recipe_item]['state'] == 'idle'):
self.roaster.idle()
elif(self.recipe[self.active_recipe_item]['state'] == 'cooling'):
self.roaster.sleep() | This is a method that will be called when the time remaining ends.
The current state can be: roasting, cooling, idle, sleeping, connecting,
or unkown. | entailment |
def process_results(self):
""" Process results by providers """
for result in self._results:
provider = result.provider
self.providers.append(provider)
if result.error:
self.failed_providers.append(provider)
continue
if not result.response:
continue
# set blacklisted to True if ip is detected with at least one dnsbl
self.blacklisted = True
provider_categories = provider.process_response(result.response)
assert provider_categories.issubset(DNSBL_CATEGORIES)
self.categories = self.categories.union(provider_categories)
self.detected_by[provider.host] = list(provider_categories) | Process results by providers | entailment |
async def dnsbl_request(self, addr, provider):
"""
Make lookup to dnsbl provider
Parameters:
* addr (string) - ip address to check
* provider (string) - dnsbl provider
Returns:
* DNSBLResponse object
Raises:
* ValueError
"""
response = None
error = None
try:
socket.inet_aton(addr)
except socket.error:
raise ValueError('wrong ip format')
ip_reversed = '.'.join(reversed(addr.split('.')))
dnsbl_query = "%s.%s" % (ip_reversed, provider.host)
try:
async with self._semaphore:
response = await self._resolver.query(dnsbl_query, 'A')
except aiodns.error.DNSError as exc:
if exc.args[0] != 4: # 4: domain name not found:
error = exc
return DNSBLResponse(addr=addr, provider=provider, response=response, error=error) | Make lookup to dnsbl provider
Parameters:
* addr (string) - ip address to check
* provider (string) - dnsbl provider
Returns:
* DNSBLResponse object
Raises:
* ValueError | entailment |
async def _check_ip(self, addr):
"""
Async check ip with dnsbl providers.
Parameters:
* addr - ip address to check
Returns:
* DNSBLResult object
"""
tasks = []
for provider in self.providers:
tasks.append(self.dnsbl_request(addr, provider))
results = await asyncio.gather(*tasks)
return DNSBLResult(addr=addr, results=results) | Async check ip with dnsbl providers.
Parameters:
* addr - ip address to check
Returns:
* DNSBLResult object | entailment |
def check_ips(self, addrs):
"""
sync check multiple ips
"""
tasks = []
for addr in addrs:
tasks.append(self._check_ip(addr))
return self._loop.run_until_complete(asyncio.gather(*tasks)) | sync check multiple ips | entailment |
def frange(start, stop, step, precision):
"""A generator that will generate a range of floats."""
value = start
while round(value, precision) < stop:
yield round(value, precision)
value += step | A generator that will generate a range of floats. | entailment |
def find_device(vidpid):
"""Finds a connected device with the given VID:PID. Returns the serial
port url."""
for port in list_ports.comports():
if re.search(vidpid, port[2], flags=re.IGNORECASE):
return port[0]
raise exceptions.RoasterLookupError | Finds a connected device with the given VID:PID. Returns the serial
port url. | entailment |
def update(self, currentTemp, targetTemp):
"""Calculate PID output value for given reference input and feedback."""
# in this implementation, ki includes the dt multiplier term,
# and kd includes the dt divisor term. This is typical practice in
# industry.
self.targetTemp = targetTemp
self.error = targetTemp - currentTemp
self.P_value = self.Kp * self.error
# it is common practice to compute derivative term against PV,
# instead of de/dt. This is because de/dt spikes
# when the set point changes.
# PV version with no dPV/dt filter - note 'previous'-'current',
# that's desired, how the math works out
self.D_value = self.Kd * (self.Derivator - currentTemp)
self.Derivator = currentTemp
self.Integrator = self.Integrator + self.error
if self.Integrator > self.Integrator_max:
self.Integrator = self.Integrator_max
elif self.Integrator < self.Integrator_min:
self.Integrator = self.Integrator_min
self.I_value = self.Integrator * self.Ki
output = self.P_value + self.I_value + self.D_value
if output > self.Output_max:
output = self.Output_max
if output < self.Output_min:
output = self.Output_min
return(output) | Calculate PID output value for given reference input and feedback. | entailment |
def setPoint(self, targetTemp):
"""Initilize the setpoint of PID."""
self.targetTemp = targetTemp
self.Integrator = 0
self.Derivator = 0 | Initilize the setpoint of PID. | entailment |
def next_state(self):
"""This is a method that will be called when the time remaining ends.
The current state can be: roasting, cooling, idle, sleeping, connecting,
or unkown."""
if(self.roaster.get_roaster_state() == 'roasting'):
self.roaster.time_remaining = 20
self.roaster.cool()
elif(self.roaster.get_roaster_state() == 'cooling'):
self.roaster.idle() | This is a method that will be called when the time remaining ends.
The current state can be: roasting, cooling, idle, sleeping, connecting,
or unkown. | entailment |
def load_nouns(self, file):
"""
Load dict from file for random words.
:param str file: filename
"""
with open(os.path.join(main_dir, file + '.dat'), 'r') as f:
self.nouns = json.load(f) | Load dict from file for random words.
:param str file: filename | entailment |
def load_dmails(self, file):
"""
Load list from file for random mails
:param str file: filename
"""
with open(os.path.join(main_dir, file + '.dat'), 'r') as f:
self.dmails = frozenset(json.load(f)) | Load list from file for random mails
:param str file: filename | entailment |
def load_nicknames(self, file):
"""
Load dict from file for random nicknames.
:param str file: filename
"""
with open(os.path.join(main_dir, file + '.dat'), 'r') as f:
self.nicknames = json.load(f) | Load dict from file for random nicknames.
:param str file: filename | entailment |
def random_words(self, letter=None, count=1):
"""
Returns list of random words.
:param str letter: letter
:param int count: how much words
:rtype: list
:returns: list of random words
:raises: ValueError
"""
self.check_count(count)
words = []
if letter is None:
all_words = list(
chain.from_iterable(self.nouns.values()))
try:
words = sample(all_words, count)
except ValueError:
len_sample = len(all_words)
raise ValueError('Param "count" must be less than {0}. \
(It is only {0} words)'.format(len_sample + 1, letter))
elif type(letter) is not str:
raise ValueError('Param "letter" must be string.')
elif letter not in self.available_letters:
raise ValueError(
'Param "letter" must be in {0}.'.format(
self.available_letters))
elif letter in self.available_letters:
try:
words = sample(self.nouns[letter], count)
except ValueError:
len_sample = len(self.nouns[letter])
raise ValueError('Param "count" must be less than {0}. \
(It is only {0} words for letter "{1}")'.format(len_sample + 1, letter))
return words | Returns list of random words.
:param str letter: letter
:param int count: how much words
:rtype: list
:returns: list of random words
:raises: ValueError | entailment |
def random_nicks(self, letter=None, gender='u', count=1):
"""
Return list of random nicks.
:param str letter: letter
:param str gender: ``'f'`` for female, ``'m'`` for male and None for both
:param int count: how much nicks
:rtype: list
:returns: list of random nicks
:raises: ValueError
"""
self.check_count(count)
nicks = []
if gender not in ('f', 'm', 'u'):
raise ValueError('Param "gender" must be in (f, m, u)')
if letter is None:
all_nicks = list(
chain.from_iterable(self.nicknames[gender].values()))
try:
nicks = sample(all_nicks, count)
except ValueError:
len_sample = len(all_nicks)
raise ValueError('Param "count" must be less than {0}. \
(It is only {0} words.")'.format(len_sample + 1))
elif type(letter) is not str:
raise ValueError('Param "letter" must be string.')
elif letter not in self.available_letters:
raise ValueError(
'Param "letter" must be in "{0}".'.format(
self.available_letters))
elif letter in self.available_letters:
try:
nicks = sample(self.nicknames[gender][letter], count)
except ValueError:
len_sample = len(self.nicknames[gender][letter])
raise ValueError('Param "count" must be less than {0}. \
(It is only {0} nicks for letter "{1}")'.format(len_sample + 1, letter))
return nicks | Return list of random nicks.
:param str letter: letter
:param str gender: ``'f'`` for female, ``'m'`` for male and None for both
:param int count: how much nicks
:rtype: list
:returns: list of random nicks
:raises: ValueError | entailment |
def randomMails(self, count=1):
"""
Return random e-mails.
:rtype: list
:returns: list of random e-mails
"""
self.check_count(count)
random_nicks = self.rn.random_nicks(count=count)
random_domains = sample(self.dmails, count)
return [
nick.lower() + "@" + domain for nick, domain in zip(random_nicks,
random_domains)
] | Return random e-mails.
:rtype: list
:returns: list of random e-mails | entailment |
def get_sentences_list(self, sentences=1):
"""
Return sentences in list.
:param int sentences: how many sentences
:returns: list of strings with sentence
:rtype: list
"""
if sentences < 1:
raise ValueError('Param "sentences" must be greater than 0.')
sentences_list = []
while sentences:
num_rand_words = random.randint(self.MIN_WORDS, self.MAX_WORDS)
random_sentence = self.make_sentence(
random.sample(self.words, num_rand_words))
sentences_list.append(random_sentence)
sentences -= 1
return sentences_list | Return sentences in list.
:param int sentences: how many sentences
:returns: list of strings with sentence
:rtype: list | entailment |
def make_sentence(list_words):
"""
Return a sentence from list of words.
:param list list_words: list of words
:returns: sentence
:rtype: str
"""
lw_len = len(list_words)
if lw_len > 6:
list_words.insert(lw_len // 2 + random.choice(range(-2, 2)), ',')
sentence = ' '.join(list_words).replace(' ,', ',')
return sentence.capitalize() + '.' | Return a sentence from list of words.
:param list list_words: list of words
:returns: sentence
:rtype: str | entailment |
def _discover_cover_image(zf, opf_xmldoc, opf_filepath):
'''
Find the cover image path in the OPF file.
Returns a tuple: (image content in base64, file extension)
'''
content = None
filepath = None
extension = None
# Strategies to discover the cover-image path:
# e.g.: <meta name="cover" content="cover"/>
tag = find_tag(opf_xmldoc, 'meta', 'name', 'cover')
if tag and 'content' in tag.attributes.keys():
item_id = tag.attributes['content'].value
if item_id:
# e.g.: <item href="cover.jpg" id="cover" media-type="image/jpeg"/>
filepath, extension = find_img_tag(opf_xmldoc, 'item', 'id', item_id)
if not filepath:
filepath, extension = find_img_tag(opf_xmldoc, 'item', 'id', 'cover-image')
if not filepath:
filepath, extension = find_img_tag(opf_xmldoc, 'item', 'id', 'cover')
# If we have found the cover image path:
if filepath:
# The cover image path is relative to the OPF file
base_dir = os.path.dirname(opf_filepath)
# Also, normalize the path (ie opfpath/../cover.jpg -> cover.jpg)
coverpath = os.path.normpath(os.path.join(base_dir, filepath))
content = zf.read(coverpath)
content = base64.b64encode(content)
return content, extension | Find the cover image path in the OPF file.
Returns a tuple: (image content in base64, file extension) | entailment |
def _discover_toc(zf, opf_xmldoc, opf_filepath):
'''
Returns a list of objects: {title: str, src: str, level: int, index: int}
'''
toc = None
# ePub 3.x
tag = find_tag(opf_xmldoc, 'item', 'properties', 'nav')
if tag and 'href' in tag.attributes.keys():
filepath = unquote(tag.attributes['href'].value)
# The xhtml file path is relative to the OPF file
base_dir = os.path.dirname(opf_filepath)
# print('- Reading Nav file: {}/{}'.format(base_dir, filepath))
npath = os.path.normpath(os.path.join(base_dir, filepath))
nav_content = zf.read(npath)
toc_xmldoc = minidom.parseString(nav_content)
_toc = []
for n in toc_xmldoc.getElementsByTagName('a'):
if n.firstChild and ('href' in n.attributes.keys()):
href = unquote(n.attributes['href'].value)
# Discarding CFI links
if '.html' in href or '.xhtml' in href:
title = n.firstChild.nodeValue
# try the second node too (maybe the first child is an empty span)
if not title and n.firstChild.firstChild:
title = n.firstChild.firstChild.nodeValue
title = title.strip() if title else None
if title:
level = -1
parentNode = n.parentNode
avoid_infinite_loop = 0 # simple security issue to avoid infinite loop for bad epub files
while parentNode and parentNode.nodeName != 'nav' and avoid_infinite_loop < 50:
if parentNode.nodeName == 'ol': # count the depth of the a link related to ol items
level += 1
parentNode = parentNode.parentNode
avoid_infinite_loop += 1
level = max(level, 0) # root level is 0, not -1
_toc.append({'title': title, 'src': href, 'level': level})
if _toc:
toc = _toc
if not toc:
# ePub 2.x
tag = find_tag(opf_xmldoc, 'item', 'id', 'ncx')
if not tag:
tag = find_tag(opf_xmldoc, 'item', 'id', 'ncxtoc')
if tag and 'href' in tag.attributes.keys():
filepath = unquote(tag.attributes['href'].value)
# The ncx file path is relative to the OPF file
base_dir = os.path.dirname(opf_filepath)
# print('- Reading NCX file: {}/{}'.format(base_dir, filepath))
npath = os.path.normpath(os.path.join(base_dir, filepath))
ncx_content = zf.read(npath)
toc_xmldoc = minidom.parseString(ncx_content)
def read_nav_point(nav_point_node, level = 0):
items = []
item = {'title': None, 'src': None, 'level': level}
children_points = []
for item_node in nav_point_node.childNodes:
if item_node.nodeName in ('navLabel', 'ncx:navLabel'):
try:
text = item_node.getElementsByTagName('text')[0].firstChild
except IndexError:
try:
text = item_node.getElementsByTagName('ncx:text')[0].firstChild
except IndexError:
text = None
item['title'] = text.nodeValue.strip() if text and text.nodeValue else None
elif item_node.nodeName in ('content', 'ncx:content'):
if item_node.hasAttribute('src'):
item['src'] = item_node.attributes['src'].value
elif item_node.nodeName in ('navPoint', 'ncx:navPoint'):
children_points.append(item_node)
if item['title']:
items.append(item)
for child_node in children_points:
subitems = read_nav_point(child_node, level=level + 1)
items.extend(subitems)
return items
def read_nav_map(toc_xmldoc, level=0):
items = []
try:
nav_map_node = toc_xmldoc.getElementsByTagName('navMap')[0]
except IndexError:
# Some ebooks use the ncx: namespace so try that too
try:
nav_map_node = toc_xmldoc.getElementsByTagName('ncx:navMap')[0]
except IndexError:
print('Failed reading TOC')
return items
for nav_point in nav_map_node.childNodes:
if nav_point.nodeName in ('navPoint', 'ncx:navPoint'):
subitems = read_nav_point(nav_point, level=level)
items.extend(subitems)
return items
toc = read_nav_map(toc_xmldoc)
# add indexes
if toc:
for i, t in enumerate(toc):
t['index'] = i
return toc | Returns a list of objects: {title: str, src: str, level: int, index: int} | entailment |
def get_epub_metadata(filepath, read_cover_image=True, read_toc=True):
'''
References: http://idpf.org/epub/201 and http://idpf.org/epub/301
1. Parse META-INF/container.xml file and find the .OPF file path.
2. In the .OPF file, find the metadata
'''
if not zipfile.is_zipfile(filepath):
raise EPubException('Unknown file')
# print('Reading ePub file: {}'.format(filepath))
zf = zipfile.ZipFile(filepath, 'r', compression=zipfile.ZIP_DEFLATED, allowZip64=True)
container = zf.read('META-INF/container.xml')
container_xmldoc = minidom.parseString(container)
# e.g.: <rootfile full-path="content.opf" media-type="application/oebps-package+xml"/>
opf_filepath = container_xmldoc.getElementsByTagName('rootfile')[0].attributes['full-path'].value
opf = zf.read(os.path.normpath(opf_filepath))
opf_xmldoc = minidom.parseString(opf)
# This file is specific to the authors if it exists.
authors_html = None
try:
authors_html = minidom.parseString(zf.read('OEBPS/pr02.html'))
except KeyError:
# Most books store authors using epub tags, so no worries.
pass
# This file is specific to the publish date if it exists.
publish_date_html = None
try:
publish_date_html = minidom.parseString(zf.read('OEBPS/pr01.html'))
except KeyError:
# Most books store authors using epub tags, so no worries.
pass
file_size_in_bytes = os.path.getsize(filepath)
data = odict({
'epub_version': _discover_epub_version(opf_xmldoc),
'title': _discover_title(opf_xmldoc),
'language': _discover_language(opf_xmldoc),
'description': _discover_description(opf_xmldoc),
'authors': _discover_authors(opf_xmldoc, authors_html=authors_html),
'publisher': _discover_publisher(opf_xmldoc),
'publication_date': _discover_publication_date(opf_xmldoc,
date_html=publish_date_html),
'identifiers': _discover_identifiers(opf_xmldoc),
'subject': _discover_subject(opf_xmldoc),
'file_size_in_bytes': file_size_in_bytes,
})
if read_cover_image:
cover_image_content, cover_image_extension = _discover_cover_image(zf, opf_xmldoc, opf_filepath)
data.cover_image_content = cover_image_content
data.cover_image_extension = cover_image_extension
if read_toc:
data.toc = _discover_toc(zf, opf_xmldoc, opf_filepath)
return data | References: http://idpf.org/epub/201 and http://idpf.org/epub/301
1. Parse META-INF/container.xml file and find the .OPF file path.
2. In the .OPF file, find the metadata | entailment |
def get_epub_opf_xml(filepath):
'''
Returns the file.OPF contents of the ePub file
'''
if not zipfile.is_zipfile(filepath):
raise EPubException('Unknown file')
# print('Reading ePub file: {}'.format(filepath))
zf = zipfile.ZipFile(filepath, 'r', compression=zipfile.ZIP_DEFLATED, allowZip64=True)
container = zf.read('META-INF/container.xml')
container_xmldoc = minidom.parseString(container)
# e.g.: <rootfile full-path="content.opf" media-type="application/oebps-package+xml"/>
opf_filepath = container_xmldoc.getElementsByTagName('rootfile')[0].attributes['full-path'].value
return zf.read(opf_filepath) | Returns the file.OPF contents of the ePub file | entailment |
def firstname(self, value, case, gender=None):
u"""
Склонение имени
:param value: Значение для склонения
:param case: Падеж для склонения (значение из класса Case)
:param gender: Грамматический род
"""
if not value:
raise ValueError('Firstname cannot be empty.')
return self.__inflect(value, case, 'firstname', gender) | u"""
Склонение имени
:param value: Значение для склонения
:param case: Падеж для склонения (значение из класса Case)
:param gender: Грамматический род | entailment |
def lastname(self, value, case, gender=None):
u"""
Склонение фамилии
:param value: Значение для склонения
:param case: Падеж для склонения (значение из класса Case)
:param gender: Грамматический род
"""
if not value:
raise ValueError('Lastname cannot be empty.')
return self.__inflect(value, case, 'lastname', gender) | u"""
Склонение фамилии
:param value: Значение для склонения
:param case: Падеж для склонения (значение из класса Case)
:param gender: Грамматический род | entailment |
def middlename(self, value, case, gender=None):
u"""
Склонение отчества
:param value: Значение для склонения
:param case: Падеж для склонения (значение из класса Case)
:param gender: Грамматический род
"""
if not value:
raise ValueError('Middlename cannot be empty.')
return self.__inflect(value, case, 'middlename', gender) | u"""
Склонение отчества
:param value: Значение для склонения
:param case: Падеж для склонения (значение из класса Case)
:param gender: Грамматический род | entailment |
def __split_name(self, name):
u"""
Разделяет имя на сегменты по разделителям в self.separators
:param name: имя
:return: разделённое имя вместе с разделителями
"""
def gen(name, separators):
if len(separators) == 0:
yield name
else:
segments = name.split(separators[0])
for subsegment in gen(segments[0], separators[1:]):
yield subsegment
for segment in segments[1:]:
for subsegment in gen(segment, separators[1:]):
yield separators[0]
yield subsegment
return gen(name, self.separators) | u"""
Разделяет имя на сегменты по разделителям в self.separators
:param name: имя
:return: разделённое имя вместе с разделителями | entailment |
def expand(expression):
"""
Expand a reference expression to individual spans.
Also works on space-separated ID lists, although a sequence of space
characters will be considered a delimiter.
>>> expand('a1')
'a1'
>>> expand('a1[3:5]')
'a1[3:5]'
>>> expand('a1[3:5+6:7]')
'a1[3:5]+a1[6:7]'
>>> expand('a1 a2 a3')
'a1 a2 a3'
"""
tokens = []
for (pre, _id, _range) in robust_ref_re.findall(expression):
if not _range:
tokens.append('{}{}'.format(pre, _id))
else:
tokens.append(pre)
tokens.extend(
'{}{}[{}:{}]'.format(delim, _id, start, end)
for delim, start, end in span_re.findall(_range)
)
return ''.join(tokens) | Expand a reference expression to individual spans.
Also works on space-separated ID lists, although a sequence of space
characters will be considered a delimiter.
>>> expand('a1')
'a1'
>>> expand('a1[3:5]')
'a1[3:5]'
>>> expand('a1[3:5+6:7]')
'a1[3:5]+a1[6:7]'
>>> expand('a1 a2 a3')
'a1 a2 a3' | entailment |
def compress(expression):
"""
Compress a reference expression to group spans on the same id.
Also works on space-separated ID lists, although a sequence of space
characters will be considered a delimiter.
>>> compress('a1')
'a1'
>>> compress('a1[3:5]')
'a1[3:5]'
>>> compress('a1[3:5+6:7]')
'a1[3:5+6:7]'
>>> compress('a1[3:5]+a1[6:7]')
'a1[3:5+6:7]'
>>> compress('a1 a2 a3')
'a1 a2 a3'
"""
tokens = []
selection = []
last_id = None
for (pre, _id, _range) in robust_ref_re.findall(expression):
if _range and _id == last_id:
selection.extend([pre, _range])
continue
if selection:
tokens.extend(selection + [']'])
selection = []
tokens.extend([pre, _id])
if _range:
selection = ['[', _range]
last_id = _id
else:
last_id = None
if selection:
tokens.extend(selection + [']'])
return ''.join(tokens) | Compress a reference expression to group spans on the same id.
Also works on space-separated ID lists, although a sequence of space
characters will be considered a delimiter.
>>> compress('a1')
'a1'
>>> compress('a1[3:5]')
'a1[3:5]'
>>> compress('a1[3:5+6:7]')
'a1[3:5+6:7]'
>>> compress('a1[3:5]+a1[6:7]')
'a1[3:5+6:7]'
>>> compress('a1 a2 a3')
'a1 a2 a3' | entailment |
def selections(expression, keep_delimiters=True):
"""
Split the expression into individual selection expressions. The
delimiters will be kept as separate items if keep_delimters=True.
Also works on space-separated ID lists, although a sequence of space
characters will be considered a delimiter.
>>> selections('a1')
['a1']
>>> selections('a1[3:5]')
['a1[3:5]']
>>> selections('a1[3:5+6:7]')
['a1[3:5+6:7]']
>>> selections('a1[3:5+6:7]+a2[1:4]')
['a1[3:5+6:7]', '+', 'a2[1:4]']
>>> selections('a1[3:5+6:7]+a2[1:4]', keep_delimiters=False)
['a1[3:5+6:7]', 'a2[1:4]']
>>> selections('a1 a2 a3')
['a1', ' ', 'a2', ' ', 'a3']
"""
tokens = []
for (pre, _id, _range) in robust_ref_re.findall(expression):
if keep_delimiters and pre:
tokens.append(pre)
if _id:
if _range:
tokens.append('{}[{}]'.format(_id, _range))
else:
tokens.append(_id)
return tokens | Split the expression into individual selection expressions. The
delimiters will be kept as separate items if keep_delimters=True.
Also works on space-separated ID lists, although a sequence of space
characters will be considered a delimiter.
>>> selections('a1')
['a1']
>>> selections('a1[3:5]')
['a1[3:5]']
>>> selections('a1[3:5+6:7]')
['a1[3:5+6:7]']
>>> selections('a1[3:5+6:7]+a2[1:4]')
['a1[3:5+6:7]', '+', 'a2[1:4]']
>>> selections('a1[3:5+6:7]+a2[1:4]', keep_delimiters=False)
['a1[3:5+6:7]', 'a2[1:4]']
>>> selections('a1 a2 a3')
['a1', ' ', 'a2', ' ', 'a3'] | entailment |
def resolve(container, expression):
"""
Return the string that is the resolution of the alignment expression
`expression`, which selects ids from `container`.
"""
itemgetter = getattr(container, 'get_item', container.get)
tokens = []
expression = expression.strip()
for sel_delim, _id, _range in selection_re.findall(expression):
tokens.append(delimiters.get(sel_delim, ''))
item = itemgetter(_id)
if item is None:
raise XigtStructureError(
'Referred Item (id: {}) from reference "{}" does not '
'exist in the given container.'
.format(_id, expression)
)
# treat None values as empty strings for resolution
value = item.value() or ''
if _range:
for spn_delim, start, end in span_re.findall(_range):
start = int(start) if start else None
end = int(end) if end else None
tokens.extend([
delimiters.get(spn_delim, ''),
value[start:end]
])
else:
tokens.append(value)
return ''.join(tokens) | Return the string that is the resolution of the alignment expression
`expression`, which selects ids from `container`. | entailment |
def referents(igt, id, refattrs=None):
"""
Return a list of ids denoting objects (tiers or items) in `igt` that
are referred by the object denoted by `id` using a reference
attribute in `refattrs`. If `refattrs` is None, then consider all
known reference attributes for the type of object denoted by _id_.
In other words, if 'b1' refers to 'a1' using 'alignment', then
`referents(igt, 'b1', ['alignment'])` returns `['a1']`.
"""
obj = igt.get_any(id)
if obj is None:
raise XigtLookupError(id)
if refattrs is None:
refattrs = obj.allowed_reference_attributes()
return {ra: ids(obj.attributes.get(ra, '')) for ra in refattrs} | Return a list of ids denoting objects (tiers or items) in `igt` that
are referred by the object denoted by `id` using a reference
attribute in `refattrs`. If `refattrs` is None, then consider all
known reference attributes for the type of object denoted by _id_.
In other words, if 'b1' refers to 'a1' using 'alignment', then
`referents(igt, 'b1', ['alignment'])` returns `['a1']`. | entailment |
def referrers(igt, id, refattrs=None):
"""
Return a list of ids denoting objects (tiers or items) in `igt` that
refer to the given `id`. In other words, if 'b1' refers to 'a1',
then `referrers(igt, 'a1')` returns `['b1']`.
"""
if refattrs is None:
result = {}
else:
result = {ra: [] for ra in refattrs}
# if the id is a tier, only look at tiers; otherwise only look at items
try:
obj = igt[id]
others = igt.tiers
except KeyError:
obj = igt.get_item(id)
others = [i for t in igt.tiers for i in t.items]
if obj is None:
raise XigtLookupError(id)
for other in others:
if other.id is None:
continue # raise a warning?
_refattrs = refattrs
if _refattrs is None:
_refattrs = other.allowed_reference_attributes()
attrget = other.attributes.get # just loop optimization
for ra in _refattrs:
result.setdefault(ra, [])
if id in ids(attrget(ra, '')):
result[ra].append(other.id)
return result | Return a list of ids denoting objects (tiers or items) in `igt` that
refer to the given `id`. In other words, if 'b1' refers to 'a1',
then `referrers(igt, 'a1')` returns `['b1']`. | entailment |
def ancestors(obj, refattrs=(ALIGNMENT, SEGMENTATION)):
"""
>>> for anc in query.ancestors(igt.get_item('g1'), refattrs=(ALIGNMENT, SEGMENTATION)):
... print(anc)
(<Tier object (id: g type: glosses) at ...>, 'alignment', <Tier object (id: m type: morphemes) at ...>, [<Item object (id: m1) at ...>])
(<Tier object (id: m type: morphemes) at ...>, 'segmentation', <Tier object (id: w type: words) at ...>, [<Item object (id: w1) at ...>])
(<Tier object (id: w type: words) at ...>, 'segmentation', <Tier object (id: p type: phrases) at ...>, [<Item object (id: p1) at ...>])
"""
if hasattr(obj, 'tier'):
tier = obj.tier
items = [obj]
else:
tier = obj
items = tier.items
# a tier may be visited twice (e.g. A > B > A), but then it stops;
# this is to avoid cycles
visited = set([tier.id])
while True:
# get the first specified attribute
refattr = next((ra for ra in refattrs if ra in tier.attributes), None)
if not refattr:
break
reftier = ref.dereference(tier, refattr)
ids = set(chain.from_iterable(
ref.ids(item.attributes.get(refattr, '')) for item in items
))
refitems = [item for item in reftier.items if item.id in ids]
yield (tier, refattr, reftier, refitems)
# cycle detection; break if we've now encountered something twice
if reftier.id in visited:
break
visited.update(reftier.id)
tier = reftier
items = refitems | >>> for anc in query.ancestors(igt.get_item('g1'), refattrs=(ALIGNMENT, SEGMENTATION)):
... print(anc)
(<Tier object (id: g type: glosses) at ...>, 'alignment', <Tier object (id: m type: morphemes) at ...>, [<Item object (id: m1) at ...>])
(<Tier object (id: m type: morphemes) at ...>, 'segmentation', <Tier object (id: w type: words) at ...>, [<Item object (id: w1) at ...>])
(<Tier object (id: w type: words) at ...>, 'segmentation', <Tier object (id: p type: phrases) at ...>, [<Item object (id: p1) at ...>]) | entailment |
def descendants(obj, refattrs=(SEGMENTATION, ALIGNMENT), follow='first'):
"""
>>> for des in query.descendants(igt.get_item('p1'), refattrs=(SEGMENTATION, ALIGNMENT)):
... print(des)
(<Tier object (id: p type: phrases) at ...>, 'segmentation', <Tier object (id: w type: words) at ...>, [<Item object (id: w1) at ...>])
(<Tier object (id: p type: phrases) at ...>, 'alignment', <Tier object (id: t type: translations) at ...>, [<Item object (id: t1) at ...>])
(<Tier object (id: w type: words) at ...>, 'segmentation', <Tier object (id: m type: morphemes) at ...>, [<Item object (id: m1) at ...>])
(<Tier object (id: m type: morphemes) at ...>, 'alignment', <Tier object (id: g type: glosses) at ...>, [<Item object (id: g1) at ...>])
"""
if hasattr(obj, 'tier'):
tier = obj.tier
items = [obj]
else:
tier = obj
items = tier.items
igt = tier.igt
visited = set()
agenda = deque([(tier, items)])
while agenda:
tier, items = agenda.popleft()
tier_refs = tier.referrers(refattrs)
item_ids = set(item.id for item in items)
# get followable refattrs with something on the referrers list
ras = [ra for ra in refattrs if tier_refs[ra]]
if follow == 'first' and ras:
ras = [ras[0]]
if not ras:
continue
# unlike ancestors, descendants for a refattr may have 1+ tiers
for refattr in ras:
# try to avoid cycles
if (tier.id, refattr) in visited:
continue
else:
visited.add((tier.id, refattr))
for reftier_id in tier_refs[refattr]:
reftier = igt[reftier_id]
refitems = [
item for item in reftier.items
if set(ref.ids(item.attributes.get(refattr,'')))
.intersection(item_ids)
]
yield (tier, refattr, reftier, refitems)
agenda.append((reftier, refitems)) | >>> for des in query.descendants(igt.get_item('p1'), refattrs=(SEGMENTATION, ALIGNMENT)):
... print(des)
(<Tier object (id: p type: phrases) at ...>, 'segmentation', <Tier object (id: w type: words) at ...>, [<Item object (id: w1) at ...>])
(<Tier object (id: p type: phrases) at ...>, 'alignment', <Tier object (id: t type: translations) at ...>, [<Item object (id: t1) at ...>])
(<Tier object (id: w type: words) at ...>, 'segmentation', <Tier object (id: m type: morphemes) at ...>, [<Item object (id: m1) at ...>])
(<Tier object (id: m type: morphemes) at ...>, 'alignment', <Tier object (id: g type: glosses) at ...>, [<Item object (id: g1) at ...>]) | entailment |
def default_decode(events, mode='full'):
"""Decode a XigtCorpus element."""
event, elem = next(events)
root = elem # store root for later instantiation
while (event, elem.tag) not in [('start', 'igt'), ('end', 'xigt-corpus')]:
event, elem = next(events)
igts = None
if event == 'start' and elem.tag == 'igt':
igts = (
decode_igt(e)
for e in iter_elements(
'igt', events, root, break_on=[('end', 'xigt-corpus')]
)
)
xc = decode_xigtcorpus(root, igts=igts, mode=mode)
return xc | Decode a XigtCorpus element. | entailment |
def _get_file_content(source):
"""Return a tuple, each value being a line of the source file.
Remove empty lines and comments (lines starting with a '#').
"""
filepath = os.path.join('siglists', source + '.txt')
lines = []
with resource_stream(__name__, filepath) as f:
for i, line in enumerate(f):
line = line.decode('utf-8', 'strict').strip()
if not line or line.startswith('#'):
continue
try:
re.compile(line)
except Exception as ex:
raise BadRegularExpressionLineError(
'Regex error: {} in file {} at line {}'.format(
str(ex),
filepath,
i
)
)
lines.append(line)
if source in _SPECIAL_EXTENDED_VALUES:
lines = lines + _SPECIAL_EXTENDED_VALUES[source]
return tuple(lines) | Return a tuple, each value being a line of the source file.
Remove empty lines and comments (lines starting with a '#'). | entailment |
def _get_rar_version(xfile):
"""Check quickly whether file is rar archive.
"""
buf = xfile.read(len(RAR5_ID))
if buf.startswith(RAR_ID):
return 3
elif buf.startswith(RAR5_ID):
xfile.read(1)
return 5
return 0 | Check quickly whether file is rar archive. | entailment |
def _open_next(self):
"""Proceed to next volume."""
# is the file split over archives?
if (self._cur.flags & rarfile.RAR_FILE_SPLIT_AFTER) == 0:
return False
if self._fd:
self._fd.close()
self._fd = None
# open next part
self._volfile = self._parser._next_volname(self._volfile)
fd = rarfile.XFile(self._volfile)
self._fd = fd
sig = fd.read(len(self._parser._expect_sig))
if sig != self._parser._expect_sig:
raise rarfile.BadRarFile("Invalid signature")
# loop until first file header
while 1:
cur = self._parser._parse_header(fd)
if not cur:
raise rarfile.BadRarFile("Unexpected EOF")
if cur.type in (rarfile.RAR_BLOCK_MARK, rarfile.RAR_BLOCK_MAIN):
if cur.add_size:
fd.seek(cur.add_size, 1)
continue
if cur.orig_filename != self._inf.orig_filename:
raise rarfile.BadRarFile("Did not found file entry")
self._cur = cur
self._cur_avail = cur.add_size
return True | Proceed to next volume. | entailment |
def _check(self): # TODO: fix?
"""Do not check final CRC."""
if self._returncode:
rarfile.check_returncode(self, '')
if self._remain != 0:
raise rarfile.BadRarFile("Failed the read enough data") | Do not check final CRC. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.