body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@victims.setter
def victims(self, victims):
'Sets the victims of this CredentialSetSchemaData.\n\n List of purported victims. # noqa: E501\n\n :param victims: The victims of this CredentialSetSchemaData. # noqa: E501\n :type victims: list[CredentialSetSchemaDataVictims]\n '
self._victims = victims | -6,781,148,290,898,929,000 | Sets the victims of this CredentialSetSchemaData.
List of purported victims. # noqa: E501
:param victims: The victims of this CredentialSetSchemaData. # noqa: E501
:type victims: list[CredentialSetSchemaDataVictims] | titan_client/models/credential_set_schema_data.py | victims | intel471/titan-client-python | python | @victims.setter
def victims(self, victims):
'Sets the victims of this CredentialSetSchemaData.\n\n List of purported victims. # noqa: E501\n\n :param victims: The victims of this CredentialSetSchemaData. # noqa: E501\n :type victims: list[CredentialSetSchemaDataVictims]\n '
self._victims = victims |
def to_dict(self, serialize=False):
'Returns the model properties as a dict'
result = {}
def convert(x):
if hasattr(x, 'to_dict'):
args = getfullargspec(x.to_dict).args
if (len(args) == 1):
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = (self.attribute_map.get(attr, attr) if serialize else attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: convert(x)), value))
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: (item[0], convert(item[1]))), value.items()))
else:
result[attr] = convert(value)
return result | -1,664,115,404,714,547,500 | Returns the model properties as a dict | titan_client/models/credential_set_schema_data.py | to_dict | intel471/titan-client-python | python | def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, 'to_dict'):
args = getfullargspec(x.to_dict).args
if (len(args) == 1):
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = (self.attribute_map.get(attr, attr) if serialize else attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: convert(x)), value))
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: (item[0], convert(item[1]))), value.items()))
else:
result[attr] = convert(value)
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | titan_client/models/credential_set_schema_data.py | to_str | intel471/titan-client-python | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | titan_client/models/credential_set_schema_data.py | __repr__ | intel471/titan-client-python | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, CredentialSetSchemaData)):
return False
return (self.to_dict() == other.to_dict()) | -3,107,021,995,298,215,400 | Returns true if both objects are equal | titan_client/models/credential_set_schema_data.py | __eq__ | intel471/titan-client-python | python | def __eq__(self, other):
if (not isinstance(other, CredentialSetSchemaData)):
return False
return (self.to_dict() == other.to_dict()) |
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, CredentialSetSchemaData)):
return True
return (self.to_dict() != other.to_dict()) | -4,546,790,678,469,889,000 | Returns true if both objects are not equal | titan_client/models/credential_set_schema_data.py | __ne__ | intel471/titan-client-python | python | def __ne__(self, other):
if (not isinstance(other, CredentialSetSchemaData)):
return True
return (self.to_dict() != other.to_dict()) |
def solve1(buses, est):
'Get the earliest bus from the <buses> according to the <est>imate\n time. '
arrival = [(bus - (est % bus)) for bus in buses]
earliest = min(arrival)
return (min(arrival) * buses[arrival.index(earliest)]) | -3,282,658,681,182,208,000 | Get the earliest bus from the <buses> according to the <est>imate
time. | src/day13.py | solve1 | mfrdbigolin/AoC2020 | python | def solve1(buses, est):
'Get the earliest bus from the <buses> according to the <est>imate\n time. '
arrival = [(bus - (est % bus)) for bus in buses]
earliest = min(arrival)
return (min(arrival) * buses[arrival.index(earliest)]) |
def solve2(buses, depart):
'Find the smallest timestamp, such that all the <buses> follow their\n bus ID, which is indexically paired with <depart>.\n\n Here I used the Chinese Remainder Theorem, someone well acquainted to\n anyone who does competitive or discrete mathematics. '
mods = [((b - d) % b) for (b, d) in zip(buses, depart)]
cross_mul = [(product(buses) // b) for b in buses]
return (sum([((c * pow(c, (- 1), b)) * m) for (b, c, m) in zip(buses, cross_mul, mods)]) % product(buses)) | -6,611,917,787,805,794,000 | Find the smallest timestamp, such that all the <buses> follow their
bus ID, which is indexically paired with <depart>.
Here I used the Chinese Remainder Theorem, someone well acquainted to
anyone who does competitive or discrete mathematics. | src/day13.py | solve2 | mfrdbigolin/AoC2020 | python | def solve2(buses, depart):
'Find the smallest timestamp, such that all the <buses> follow their\n bus ID, which is indexically paired with <depart>.\n\n Here I used the Chinese Remainder Theorem, someone well acquainted to\n anyone who does competitive or discrete mathematics. '
mods = [((b - d) % b) for (b, d) in zip(buses, depart)]
cross_mul = [(product(buses) // b) for b in buses]
return (sum([((c * pow(c, (- 1), b)) * m) for (b, c, m) in zip(buses, cross_mul, mods)]) % product(buses)) |
@numba.jit(nopython=True, nogil=True, parallel=True)
def _numba_add(xx, yy, nn, cinf, x, y, z, w, typ, zw, varw, ww):
'\n numba jit compiled add function\n\n - Numba compiles this function, ensure that no classes/functions are within unless they are also numba-ized\n - Numba.prange forces numba to parallelize, generates exception when parallelism fails, helping you figure out\n what needs to be fixed. Otherwise parallel=True can fail silently\n - nopython=True, this function operates entirely outside of the python interpreter\n - nogil=True, will not use the python GIL (this might be redundant with nopython)\n\n '
grows = yy.size
gcols = xx.size
doindices = 0
cinf2 = (cinf ** 2)
for idx in numba.prange(grows):
ddy = ((y - yy[idx]) ** 2)
yidx = np.flatnonzero((ddy < cinf2))
if (yidx.size == 0):
continue
xtest = (cinf2 - ddy[yidx])
for jdx in numba.prange(gcols):
xidx = np.flatnonzero((((x[yidx] - xx[jdx]) ** 2) < xtest))
if (xidx.size == 0):
continue
II = yidx[xidx]
if (typ == 'dwm'):
if (w.size != 1):
R = ((((xx[jdx] - x[II]) ** 2) + ((yy[idx] - y[II]) ** 2)) ** (w[II] / 2.0))
else:
R = ((((xx[jdx] - x[II]) ** 2) + ((yy[idx] - y[II]) ** 2)) ** (w / 2.0))
if (not doindices):
nn[(idx, jdx)] = np.nansum(np.array([nn[(idx, jdx)], xidx.size]))
else:
nn[(idx, jdx)] = ((idx * (gcols - 1)) + jdx)
if (w.size != 1):
chunk_w = w[II]
else:
chunk_w = w
if (typ == 'mean'):
(zw[(idx, jdx)], ww[(idx, jdx)], varw[(idx, jdx)]) = _numba_mean_by_cell(zw[(idx, jdx)], ww[(idx, jdx)], varw[(idx, jdx)], nn[(idx, jdx)], z[II], chunk_w)
elif (typ == 'median'):
(zw[(idx, jdx)], ww[(idx, jdx)], varw[(idx, jdx)]) = _numba_median_by_cell(zw[(idx, jdx)], ww[(idx, jdx)], varw[(idx, jdx)], z[II])
return (zw, ww, varw, nn) | -2,726,346,112,734,129,700 | numba jit compiled add function
- Numba compiles this function, ensure that no classes/functions are within unless they are also numba-ized
- Numba.prange forces numba to parallelize, generates exception when parallelism fails, helping you figure out
what needs to be fixed. Otherwise parallel=True can fail silently
- nopython=True, this function operates entirely outside of the python interpreter
- nogil=True, will not use the python GIL (this might be redundant with nopython) | VGRID/vgrid.py | _numba_add | valschmidt/vgrid | python | @numba.jit(nopython=True, nogil=True, parallel=True)
def _numba_add(xx, yy, nn, cinf, x, y, z, w, typ, zw, varw, ww):
'\n numba jit compiled add function\n\n - Numba compiles this function, ensure that no classes/functions are within unless they are also numba-ized\n - Numba.prange forces numba to parallelize, generates exception when parallelism fails, helping you figure out\n what needs to be fixed. Otherwise parallel=True can fail silently\n - nopython=True, this function operates entirely outside of the python interpreter\n - nogil=True, will not use the python GIL (this might be redundant with nopython)\n\n '
grows = yy.size
gcols = xx.size
doindices = 0
cinf2 = (cinf ** 2)
for idx in numba.prange(grows):
ddy = ((y - yy[idx]) ** 2)
yidx = np.flatnonzero((ddy < cinf2))
if (yidx.size == 0):
continue
xtest = (cinf2 - ddy[yidx])
for jdx in numba.prange(gcols):
xidx = np.flatnonzero((((x[yidx] - xx[jdx]) ** 2) < xtest))
if (xidx.size == 0):
continue
II = yidx[xidx]
if (typ == 'dwm'):
if (w.size != 1):
R = ((((xx[jdx] - x[II]) ** 2) + ((yy[idx] - y[II]) ** 2)) ** (w[II] / 2.0))
else:
R = ((((xx[jdx] - x[II]) ** 2) + ((yy[idx] - y[II]) ** 2)) ** (w / 2.0))
if (not doindices):
nn[(idx, jdx)] = np.nansum(np.array([nn[(idx, jdx)], xidx.size]))
else:
nn[(idx, jdx)] = ((idx * (gcols - 1)) + jdx)
if (w.size != 1):
chunk_w = w[II]
else:
chunk_w = w
if (typ == 'mean'):
(zw[(idx, jdx)], ww[(idx, jdx)], varw[(idx, jdx)]) = _numba_mean_by_cell(zw[(idx, jdx)], ww[(idx, jdx)], varw[(idx, jdx)], nn[(idx, jdx)], z[II], chunk_w)
elif (typ == 'median'):
(zw[(idx, jdx)], ww[(idx, jdx)], varw[(idx, jdx)]) = _numba_median_by_cell(zw[(idx, jdx)], ww[(idx, jdx)], varw[(idx, jdx)], z[II])
return (zw, ww, varw, nn) |
@numba.jit(nopython=True)
def _numba_median_by_cell(zw_cell, ww_cell, varw_cell, z):
' Calculate the median value in each grid cell.\n\n The method used here to provide a "running median" is for each add(),\n calculate the average of the existing value with the median of the\n new points. This method works reasonably well, but can produce\n inferior results if a single add() contains only outliers and their\n are insufficient additional adds to constrain it.'
zw = np.nanmean(np.append(zw_cell, np.nanmedian(z)))
ww = 1
varw = np.nansum(np.append((z - (zw_cell / (ww_cell ** 2))), varw_cell))
return (zw, ww, varw) | 1,534,134,619,217,558,300 | Calculate the median value in each grid cell.
The method used here to provide a "running median" is for each add(),
calculate the average of the existing value with the median of the
new points. This method works reasonably well, but can produce
inferior results if a single add() contains only outliers and their
are insufficient additional adds to constrain it. | VGRID/vgrid.py | _numba_median_by_cell | valschmidt/vgrid | python | @numba.jit(nopython=True)
def _numba_median_by_cell(zw_cell, ww_cell, varw_cell, z):
' Calculate the median value in each grid cell.\n\n The method used here to provide a "running median" is for each add(),\n calculate the average of the existing value with the median of the\n new points. This method works reasonably well, but can produce\n inferior results if a single add() contains only outliers and their\n are insufficient additional adds to constrain it.'
zw = np.nanmean(np.append(zw_cell, np.nanmedian(z)))
ww = 1
varw = np.nansum(np.append((z - (zw_cell / (ww_cell ** 2))), varw_cell))
return (zw, ww, varw) |
def zz(self):
' Calculate the z values for the grid.'
return (self.zw / self.ww) | -3,181,243,170,877,537,000 | Calculate the z values for the grid. | VGRID/vgrid.py | zz | valschmidt/vgrid | python | def zz(self):
' '
return (self.zw / self.ww) |
def mean_wholegrid(self):
' Calculate mean values for the whole grid.'
[self.mean(idx, jdx) for idx in range(self.yy.size) for jdx in range(self.xx.size) if (self._I[idx][jdx] is not None)] | 8,913,522,569,416,889,000 | Calculate mean values for the whole grid. | VGRID/vgrid.py | mean_wholegrid | valschmidt/vgrid | python | def mean_wholegrid(self):
' '
[self.mean(idx, jdx) for idx in range(self.yy.size) for jdx in range(self.xx.size) if (self._I[idx][jdx] is not None)] |
def median_wholegrid(self):
' Calculate median values for the whole grid.'
[self.median(idx, jdx) for idx in range(self.yy.size) for jdx in range(self.xx.size) if (self._I[idx][jdx] is not None)] | -3,475,235,760,687,923,700 | Calculate median values for the whole grid. | VGRID/vgrid.py | median_wholegrid | valschmidt/vgrid | python | def median_wholegrid(self):
' '
[self.median(idx, jdx) for idx in range(self.yy.size) for jdx in range(self.xx.size) if (self._I[idx][jdx] is not None)] |
def mean(self, idx, jdx):
'Mean gridding algorithm.\n\n vgrid implemnets incremental gridding where possible.\n To do this, the sum of the product of the weights and z values are\n retained in addition to the sum of the weights. Then method zz()\n calculates the quotient of the two to obtain the actual weighted\n mean z values. Note that when all weights are one, (or if w is set to\n 1 for shorthand), a standard mean is calculated.\n\n Variance is calcualted in a similar way. In this case the sum of\n w*(z_i - mu)^2 is calculated and stored for each grid node, where\n z_i is the value to be gridded and mu is the mean of the grid node\n calculated thus far. Then this sum is divided by the sum of the\n weights to get the final estimated variance. As the mean of the grid\n node approaches the true mean, this value should approach the true\n variance.\n '
self._II = self._I[idx][jdx]
if (self._w.size == 1):
self.zw[(idx, jdx)] = np.nansum(np.concatenate((self._z[self._II], [self.zw[(idx, jdx)]])))
self.ww[(idx, jdx)] = self.nn[(idx, jdx)]
self.varw[(idx, jdx)] = np.nansum(np.concatenate((np.power((self._z[self._II] - (self.zw[(idx, jdx)] / self.nn[(idx, jdx)])), 2), [self.varw[(idx, jdx)]])))
else:
self.zw[(idx, jdx)] = np.nansum(np.append(self.zw[(idx, jdx)], (self._z[self._II] * self._w[self._II])))
self.ww[(idx, jdx)] = np.nansum(np.append(self.ww[(idx, jdx)], self._w[self._II]))
self.varw[(idx, jdx)] = np.nansum(np.append(np.power((self._z[self._II] - (self.zw[(idx, jdx)] / self.ww[(idx, jdx)])), 2), self.varw[(idx, jdx)])) | -1,551,458,373,005,666,000 | Mean gridding algorithm.
vgrid implemnets incremental gridding where possible.
To do this, the sum of the product of the weights and z values are
retained in addition to the sum of the weights. Then method zz()
calculates the quotient of the two to obtain the actual weighted
mean z values. Note that when all weights are one, (or if w is set to
1 for shorthand), a standard mean is calculated.
Variance is calcualted in a similar way. In this case the sum of
w*(z_i - mu)^2 is calculated and stored for each grid node, where
z_i is the value to be gridded and mu is the mean of the grid node
calculated thus far. Then this sum is divided by the sum of the
weights to get the final estimated variance. As the mean of the grid
node approaches the true mean, this value should approach the true
variance. | VGRID/vgrid.py | mean | valschmidt/vgrid | python | def mean(self, idx, jdx):
'Mean gridding algorithm.\n\n vgrid implemnets incremental gridding where possible.\n To do this, the sum of the product of the weights and z values are\n retained in addition to the sum of the weights. Then method zz()\n calculates the quotient of the two to obtain the actual weighted\n mean z values. Note that when all weights are one, (or if w is set to\n 1 for shorthand), a standard mean is calculated.\n\n Variance is calcualted in a similar way. In this case the sum of\n w*(z_i - mu)^2 is calculated and stored for each grid node, where\n z_i is the value to be gridded and mu is the mean of the grid node\n calculated thus far. Then this sum is divided by the sum of the\n weights to get the final estimated variance. As the mean of the grid\n node approaches the true mean, this value should approach the true\n variance.\n '
self._II = self._I[idx][jdx]
if (self._w.size == 1):
self.zw[(idx, jdx)] = np.nansum(np.concatenate((self._z[self._II], [self.zw[(idx, jdx)]])))
self.ww[(idx, jdx)] = self.nn[(idx, jdx)]
self.varw[(idx, jdx)] = np.nansum(np.concatenate((np.power((self._z[self._II] - (self.zw[(idx, jdx)] / self.nn[(idx, jdx)])), 2), [self.varw[(idx, jdx)]])))
else:
self.zw[(idx, jdx)] = np.nansum(np.append(self.zw[(idx, jdx)], (self._z[self._II] * self._w[self._II])))
self.ww[(idx, jdx)] = np.nansum(np.append(self.ww[(idx, jdx)], self._w[self._II]))
self.varw[(idx, jdx)] = np.nansum(np.append(np.power((self._z[self._II] - (self.zw[(idx, jdx)] / self.ww[(idx, jdx)])), 2), self.varw[(idx, jdx)])) |
def var(self):
' Calculate the variance'
return (self.varw / self.ww) | 8,553,262,426,190,962,000 | Calculate the variance | VGRID/vgrid.py | var | valschmidt/vgrid | python | def var(self):
' '
return (self.varw / self.ww) |
def std(self):
'Calculate the standard deviation'
return np.sqrt(self.var()) | -7,358,242,616,525,090,000 | Calculate the standard deviation | VGRID/vgrid.py | std | valschmidt/vgrid | python | def std(self):
return np.sqrt(self.var()) |
def meanwithoutlierrejection(self):
' TO DO: Calculate the mean, rejecting values that exceed 3-sigma\n from existing estimate.'
pass | 4,126,677,113,143,395,000 | TO DO: Calculate the mean, rejecting values that exceed 3-sigma
from existing estimate. | VGRID/vgrid.py | meanwithoutlierrejection | valschmidt/vgrid | python | def meanwithoutlierrejection(self):
' TO DO: Calculate the mean, rejecting values that exceed 3-sigma\n from existing estimate.'
pass |
def median(self, idx, jdx):
' Calculate the median value in each grid cell.\n \n The method used here to provide a "running median" is for each add(),\n calculate the average of the existing value with the median of the\n new points. This method works reasonably well, but can produce\n inferior results if a single add() contains only outliers and their\n are insufficient additional adds to constrain it.'
self.zw[(idx, jdx)] = np.nanmean(np.append(self.zw[(idx, jdx)], np.nanmedian(self._z[self._II])))
self.ww[(idx, jdx)] = 1
self.varw[(idx, jdx)] = np.nansum(np.append(np.power((self._z[self._II] - (self.zw[(idx, jdx)] / self.ww[(idx, jdx)])), 2), self.varw[(idx, jdx)]))
pass | -8,187,741,193,710,137,000 | Calculate the median value in each grid cell.
The method used here to provide a "running median" is for each add(),
calculate the average of the existing value with the median of the
new points. This method works reasonably well, but can produce
inferior results if a single add() contains only outliers and their
are insufficient additional adds to constrain it. | VGRID/vgrid.py | median | valschmidt/vgrid | python | def median(self, idx, jdx):
' Calculate the median value in each grid cell.\n \n The method used here to provide a "running median" is for each add(),\n calculate the average of the existing value with the median of the\n new points. This method works reasonably well, but can produce\n inferior results if a single add() contains only outliers and their\n are insufficient additional adds to constrain it.'
self.zw[(idx, jdx)] = np.nanmean(np.append(self.zw[(idx, jdx)], np.nanmedian(self._z[self._II])))
self.ww[(idx, jdx)] = 1
self.varw[(idx, jdx)] = np.nansum(np.append(np.power((self._z[self._II] - (self.zw[(idx, jdx)] / self.ww[(idx, jdx)])), 2), self.varw[(idx, jdx)]))
pass |
def gridsizesanitycheck(self, M):
'Check to see if the grid size is going to be REALLY large. '
if (M.__len__() > 10000.0):
return False
else:
return True | -5,416,826,466,113,330,000 | Check to see if the grid size is going to be REALLY large. | VGRID/vgrid.py | gridsizesanitycheck | valschmidt/vgrid | python | def gridsizesanitycheck(self, M):
' '
if (M.__len__() > 10000.0):
return False
else:
return True |
def create_new_grid(self):
' Create a new empty grid.'
self.xx = np.arange(min(self._x), (max(self._x) + self.cs), self.cs)
self.yy = np.arange(min(self._y), (max(self._y) + self.cs), self.cs)
if (not (self.gridsizesanitycheck(self.xx) and self.gridsizesanitycheck(self.yy))):
print('Grid size is too large.')
return
self.zw = np.empty((self.yy.size, self.xx.size))
self.zw.fill(np.nan)
self.nn = np.copy(self.zw)
self.ww = np.copy(self.zw)
self.varw = np.copy(self.zw) | -1,567,089,638,302,527,200 | Create a new empty grid. | VGRID/vgrid.py | create_new_grid | valschmidt/vgrid | python | def create_new_grid(self):
' '
self.xx = np.arange(min(self._x), (max(self._x) + self.cs), self.cs)
self.yy = np.arange(min(self._y), (max(self._y) + self.cs), self.cs)
if (not (self.gridsizesanitycheck(self.xx) and self.gridsizesanitycheck(self.yy))):
print('Grid size is too large.')
return
self.zw = np.empty((self.yy.size, self.xx.size))
self.zw.fill(np.nan)
self.nn = np.copy(self.zw)
self.ww = np.copy(self.zw)
self.varw = np.copy(self.zw) |
def add(self, x, y, z, w):
" An incremental gridding function\n\n Arguments:\n x: x-coordinates\n y: y-coordiantes\n z: z-scalar values to grid\n w: w-weight applied to each point (size of x or 1 for no weighting)\n When 'type' = Nlowerthan or Ngreaterthan, w is the threshold value\n When 'type' = distance weighted mean, distance = R^w\n cs: grid cell size\n cinf: cell influence\n type: type of grid (see below)\n\n Output:\n g.xx: vector of grid cell x coordinates.\n g.yy: vector of grid cell y coordiantes.\n g.zz: 2D matrix of grided values times their weights.\n g.nn: 2D matrix containing the number of points in each grid cell.\n g.ww: sum of weights of items in the grid cell\n\n %\n % Grid types:\n % mean:\n % Average of the values. When w != 1, the mean is calculated by\n % multipying each value in the cell by its weight divided by the sum\n % of the weights in that cell.\n %\n % median:\n % Calculates the median value for each grid cell.\n %\n % mode:\n % Calculates the mode of the values for each grid cell.\n %\n % shoalest:\n % Calculates the minimum value for each grid cell.\n %\n % deepest:\n % Calculates the maximum value for each grid cell.\n %\n % stddev:\n % Calculates the standard deviation of the values in each grid cell.\n %\n % stderr:\n % Calculates the standard error of the values in each grid cell\n % (stddev/N, where stddev is the standard deviation and N is the number\n % of points falling in the cell)\n %\n % dwm:\n % Calculates the distance weighted mean where each value in the cell is\n % inversely weighted by the square if it's distance to the cell node.\n %\n % Nlowerthan:\n % Calculates the number of points in the grid cell lower than some value,\n % w.\n %\n % Ngreaterthan:\n % Calculates the number of points greater than some value w.\n %\n % To Do:\n % - Rewrite mean function as a matrix operation to simplify the propagation\n % of uncertainty calcualtion. Actually this might be make more general such\n % that your pass a list of values, their uncertainty and weighting factors\n % and get back a mean and propagated uncertainty. This would allow\n % relatively simple incorporation of things like range weighting, footprint\n % weighting, gaussian weighting, etc.\n % - Add uncertainty to z input and propagate these through the\n % calculations.\n % - Add uncertainty to x and y inputs and propagate these through the\n % calculations (more difficult)\n % Rewrite a C mex function.\n %\n % Val Schmidt\n % CCOM/JHC\n % 2018, 2019\n "
if (np.isscalar(x) or np.isscalar(y) or np.isscalar(z)):
print('X, Y, or Z is scalar - must be numpy array.')
sys.exit()
self._x = x.ravel()
self._y = y.ravel()
self._z = z.ravel()
if (not np.isscalar(w)):
self._w = w.ravel()
else:
self._w = np.array(w)
if (self._w.size != 1):
if sum((self._w == 0)):
print('Found zero weights. Weights cannot be zero.')
print('Setting to 1e-20.')
self._w[(self._w == 0)] = 1e-20
if (self.zw is None):
self.create_new_grid()
else:
self.expand_grid()
grows = self.yy.size
gcols = self.xx.size
doindices = 0
self.sort_data_kdtree()
if (self.type == 'dwm'):
print('Not yet supported.')
if (self.type == 'mean'):
self.mean_wholegrid()
if (self.type == 'median'):
self.median_wholegrid() | 6,376,264,592,654,362,000 | An incremental gridding function
Arguments:
x: x-coordinates
y: y-coordiantes
z: z-scalar values to grid
w: w-weight applied to each point (size of x or 1 for no weighting)
When 'type' = Nlowerthan or Ngreaterthan, w is the threshold value
When 'type' = distance weighted mean, distance = R^w
cs: grid cell size
cinf: cell influence
type: type of grid (see below)
Output:
g.xx: vector of grid cell x coordinates.
g.yy: vector of grid cell y coordiantes.
g.zz: 2D matrix of grided values times their weights.
g.nn: 2D matrix containing the number of points in each grid cell.
g.ww: sum of weights of items in the grid cell
%
% Grid types:
% mean:
% Average of the values. When w != 1, the mean is calculated by
% multipying each value in the cell by its weight divided by the sum
% of the weights in that cell.
%
% median:
% Calculates the median value for each grid cell.
%
% mode:
% Calculates the mode of the values for each grid cell.
%
% shoalest:
% Calculates the minimum value for each grid cell.
%
% deepest:
% Calculates the maximum value for each grid cell.
%
% stddev:
% Calculates the standard deviation of the values in each grid cell.
%
% stderr:
% Calculates the standard error of the values in each grid cell
% (stddev/N, where stddev is the standard deviation and N is the number
% of points falling in the cell)
%
% dwm:
% Calculates the distance weighted mean where each value in the cell is
% inversely weighted by the square if it's distance to the cell node.
%
% Nlowerthan:
% Calculates the number of points in the grid cell lower than some value,
% w.
%
% Ngreaterthan:
% Calculates the number of points greater than some value w.
%
% To Do:
% - Rewrite mean function as a matrix operation to simplify the propagation
% of uncertainty calcualtion. Actually this might be make more general such
% that your pass a list of values, their uncertainty and weighting factors
% and get back a mean and propagated uncertainty. This would allow
% relatively simple incorporation of things like range weighting, footprint
% weighting, gaussian weighting, etc.
% - Add uncertainty to z input and propagate these through the
% calculations.
% - Add uncertainty to x and y inputs and propagate these through the
% calculations (more difficult)
% Rewrite a C mex function.
%
% Val Schmidt
% CCOM/JHC
% 2018, 2019 | VGRID/vgrid.py | add | valschmidt/vgrid | python | def add(self, x, y, z, w):
" An incremental gridding function\n\n Arguments:\n x: x-coordinates\n y: y-coordiantes\n z: z-scalar values to grid\n w: w-weight applied to each point (size of x or 1 for no weighting)\n When 'type' = Nlowerthan or Ngreaterthan, w is the threshold value\n When 'type' = distance weighted mean, distance = R^w\n cs: grid cell size\n cinf: cell influence\n type: type of grid (see below)\n\n Output:\n g.xx: vector of grid cell x coordinates.\n g.yy: vector of grid cell y coordiantes.\n g.zz: 2D matrix of grided values times their weights.\n g.nn: 2D matrix containing the number of points in each grid cell.\n g.ww: sum of weights of items in the grid cell\n\n %\n % Grid types:\n % mean:\n % Average of the values. When w != 1, the mean is calculated by\n % multipying each value in the cell by its weight divided by the sum\n % of the weights in that cell.\n %\n % median:\n % Calculates the median value for each grid cell.\n %\n % mode:\n % Calculates the mode of the values for each grid cell.\n %\n % shoalest:\n % Calculates the minimum value for each grid cell.\n %\n % deepest:\n % Calculates the maximum value for each grid cell.\n %\n % stddev:\n % Calculates the standard deviation of the values in each grid cell.\n %\n % stderr:\n % Calculates the standard error of the values in each grid cell\n % (stddev/N, where stddev is the standard deviation and N is the number\n % of points falling in the cell)\n %\n % dwm:\n % Calculates the distance weighted mean where each value in the cell is\n % inversely weighted by the square if it's distance to the cell node.\n %\n % Nlowerthan:\n % Calculates the number of points in the grid cell lower than some value,\n % w.\n %\n % Ngreaterthan:\n % Calculates the number of points greater than some value w.\n %\n % To Do:\n % - Rewrite mean function as a matrix operation to simplify the propagation\n % of uncertainty calcualtion. Actually this might be make more general such\n % that your pass a list of values, their uncertainty and weighting factors\n % and get back a mean and propagated uncertainty. This would allow\n % relatively simple incorporation of things like range weighting, footprint\n % weighting, gaussian weighting, etc.\n % - Add uncertainty to z input and propagate these through the\n % calculations.\n % - Add uncertainty to x and y inputs and propagate these through the\n % calculations (more difficult)\n % Rewrite a C mex function.\n %\n % Val Schmidt\n % CCOM/JHC\n % 2018, 2019\n "
if (np.isscalar(x) or np.isscalar(y) or np.isscalar(z)):
print('X, Y, or Z is scalar - must be numpy array.')
sys.exit()
self._x = x.ravel()
self._y = y.ravel()
self._z = z.ravel()
if (not np.isscalar(w)):
self._w = w.ravel()
else:
self._w = np.array(w)
if (self._w.size != 1):
if sum((self._w == 0)):
print('Found zero weights. Weights cannot be zero.')
print('Setting to 1e-20.')
self._w[(self._w == 0)] = 1e-20
if (self.zw is None):
self.create_new_grid()
else:
self.expand_grid()
grows = self.yy.size
gcols = self.xx.size
doindices = 0
self.sort_data_kdtree()
if (self.type == 'dwm'):
print('Not yet supported.')
if (self.type == 'mean'):
self.mean_wholegrid()
if (self.type == 'median'):
self.median_wholegrid() |
def sort_data_kdtree(self):
' A sorting of the data into grid cells using KDtrees.'
tree = spatial.cKDTree(list(zip(self._x.ravel(), self._y.ravel())), leafsize=10000000.0)
(xxx, yyy) = np.meshgrid(self.xx, self.yy)
indexes = tree.query_ball_point(np.vstack((xxx.ravel(), yyy.ravel())).T, r=self.cinf, p=2, n_jobs=(- 1)).reshape(xxx.shape)
self._I = indexes | -8,088,798,651,022,963,000 | A sorting of the data into grid cells using KDtrees. | VGRID/vgrid.py | sort_data_kdtree | valschmidt/vgrid | python | def sort_data_kdtree(self):
' '
tree = spatial.cKDTree(list(zip(self._x.ravel(), self._y.ravel())), leafsize=10000000.0)
(xxx, yyy) = np.meshgrid(self.xx, self.yy)
indexes = tree.query_ball_point(np.vstack((xxx.ravel(), yyy.ravel())).T, r=self.cinf, p=2, n_jobs=(- 1)).reshape(xxx.shape)
self._I = indexes |
def sort_data(self):
' Determine which data contributes to each grid node.\n The list of indices is populated in self._I[n][m], where n and m\n indicate the grid node.'
self._I = [x[:] for x in ([([None] * self.xx.size)] * self.yy.size)]
cinf2 = (self.cinf ** 2)
for idx in np.arange(0, self.yy.size, dtype='uint32'):
'\n We need to search through all the data efficiently to determine\n indices for points that will contribute to a grid node. Those that\n contribute are ones that fall within the "cell influence" (cinf).\n Thse are the ones that meet the criteria:\n\n sqrt( (x-xo).^2 + (y-yo).^2 ) < cinf\n\n Squaring both sides....\n\n (x-xo)^2 + (y-yo)^2 < cinf^2\n\n This will never be true when either term of the lhs is >= cinf^2.\n So we reduce the search by doing these piece-meal. '
ddy = ((self._y - self.yy[idx]) ** 2)
yidx = np.flatnonzero((ddy < cinf2))
if (yidx.size == 0):
continue
xtest = (cinf2 - ddy[yidx])
for jdx in np.arange(0, self.xx.size, dtype='uint32'):
xidx = np.flatnonzero((((self._x[yidx] - self.xx[jdx]) ** 2) < xtest))
if (xidx.size == 0):
continue
self._I[idx][jdx] = yidx[xidx]
self.nn[(idx, jdx)] = np.nansum(np.append(self.nn[(idx, jdx)], xidx.size)) | -7,180,185,893,760,760,000 | Determine which data contributes to each grid node.
The list of indices is populated in self._I[n][m], where n and m
indicate the grid node. | VGRID/vgrid.py | sort_data | valschmidt/vgrid | python | def sort_data(self):
' Determine which data contributes to each grid node.\n The list of indices is populated in self._I[n][m], where n and m\n indicate the grid node.'
self._I = [x[:] for x in ([([None] * self.xx.size)] * self.yy.size)]
cinf2 = (self.cinf ** 2)
for idx in np.arange(0, self.yy.size, dtype='uint32'):
'\n We need to search through all the data efficiently to determine\n indices for points that will contribute to a grid node. Those that\n contribute are ones that fall within the "cell influence" (cinf).\n Thse are the ones that meet the criteria:\n\n sqrt( (x-xo).^2 + (y-yo).^2 ) < cinf\n\n Squaring both sides....\n\n (x-xo)^2 + (y-yo)^2 < cinf^2\n\n This will never be true when either term of the lhs is >= cinf^2.\n So we reduce the search by doing these piece-meal. '
ddy = ((self._y - self.yy[idx]) ** 2)
yidx = np.flatnonzero((ddy < cinf2))
if (yidx.size == 0):
continue
xtest = (cinf2 - ddy[yidx])
for jdx in np.arange(0, self.xx.size, dtype='uint32'):
xidx = np.flatnonzero((((self._x[yidx] - self.xx[jdx]) ** 2) < xtest))
if (xidx.size == 0):
continue
self._I[idx][jdx] = yidx[xidx]
self.nn[(idx, jdx)] = np.nansum(np.append(self.nn[(idx, jdx)], xidx.size)) |
def numba_add(self, x, y, z, w, chnksize=100000):
"\n An attempt at running self.add with numba. Key here is to chunk the points so that the numba compiled function\n _numba_add runs multiple times, where the first run is slow as it compiles. _numba_add is not within the class,\n as classes aren't supported. There is this new thing numba.jitclass, but it appears to still be experimental.\n\n On my test dataset containing about 4.5 million soundings, I got the following results:\n - existing add = 55.8 seconds\n - numba_add (chunksize, time) = (100, 55.2), (1000, 21.2), (10000, 17.9), (100000, 16.6), (150000, 16.2),\n (200000, 15.7), (1000000, 18.0)\n "
if (np.isscalar(x) or np.isscalar(y) or np.isscalar(z)):
print('X, Y, or Z is scalar - must be numpy array.')
sys.exit()
self._x = x.ravel()
self._y = y.ravel()
self._z = z.ravel()
if (not np.isscalar(w)):
self._w = w.ravel()
else:
self._w = np.array(w)
if (self._w.size != 1):
if sum((self._w == 0)):
print('Found zero weights. Weights cannot be zero.')
print('Setting to 1e-20.')
self._w[(self._w == 0)] = 1e-20
if (self.zw is None):
self.create_new_grid()
else:
self.expand_grid()
ptlen = len(self._x)
chnks = [[(i * chnksize), min(((i + 1) * chnksize), ptlen)] for i in range((int((ptlen / chnksize)) + 1))]
for chnk in chnks:
chnk_idx = slice(chnk[0], chnk[1])
if (self._w.size != 1):
chunk_w = self._w[chnk_idx]
else:
chunk_w = self._w
(self.zw, self.ww, self.varw, self.nn) = _numba_add(self.xx, self.yy, self.nn, self.cinf, self._x[chnk_idx], self._y[chnk_idx], self._z[chnk_idx], chunk_w, self.type, self.zw, self.varw, self.ww) | 7,784,261,836,886,096,000 | An attempt at running self.add with numba. Key here is to chunk the points so that the numba compiled function
_numba_add runs multiple times, where the first run is slow as it compiles. _numba_add is not within the class,
as classes aren't supported. There is this new thing numba.jitclass, but it appears to still be experimental.
On my test dataset containing about 4.5 million soundings, I got the following results:
- existing add = 55.8 seconds
- numba_add (chunksize, time) = (100, 55.2), (1000, 21.2), (10000, 17.9), (100000, 16.6), (150000, 16.2),
(200000, 15.7), (1000000, 18.0) | VGRID/vgrid.py | numba_add | valschmidt/vgrid | python | def numba_add(self, x, y, z, w, chnksize=100000):
"\n An attempt at running self.add with numba. Key here is to chunk the points so that the numba compiled function\n _numba_add runs multiple times, where the first run is slow as it compiles. _numba_add is not within the class,\n as classes aren't supported. There is this new thing numba.jitclass, but it appears to still be experimental.\n\n On my test dataset containing about 4.5 million soundings, I got the following results:\n - existing add = 55.8 seconds\n - numba_add (chunksize, time) = (100, 55.2), (1000, 21.2), (10000, 17.9), (100000, 16.6), (150000, 16.2),\n (200000, 15.7), (1000000, 18.0)\n "
if (np.isscalar(x) or np.isscalar(y) or np.isscalar(z)):
print('X, Y, or Z is scalar - must be numpy array.')
sys.exit()
self._x = x.ravel()
self._y = y.ravel()
self._z = z.ravel()
if (not np.isscalar(w)):
self._w = w.ravel()
else:
self._w = np.array(w)
if (self._w.size != 1):
if sum((self._w == 0)):
print('Found zero weights. Weights cannot be zero.')
print('Setting to 1e-20.')
self._w[(self._w == 0)] = 1e-20
if (self.zw is None):
self.create_new_grid()
else:
self.expand_grid()
ptlen = len(self._x)
chnks = [[(i * chnksize), min(((i + 1) * chnksize), ptlen)] for i in range((int((ptlen / chnksize)) + 1))]
for chnk in chnks:
chnk_idx = slice(chnk[0], chnk[1])
if (self._w.size != 1):
chunk_w = self._w[chnk_idx]
else:
chunk_w = self._w
(self.zw, self.ww, self.varw, self.nn) = _numba_add(self.xx, self.yy, self.nn, self.cinf, self._x[chnk_idx], self._y[chnk_idx], self._z[chnk_idx], chunk_w, self.type, self.zw, self.varw, self.ww) |
def gridTest(N=2, ProfileON=False):
' Method to test gridding.'
print(('N=%d' % N))
x = (np.random.random((N, 1)) * 100)
y = (np.random.random((N, 1)) * 100)
z = np.exp((np.sqrt((((x - 50.0) ** 2) + ((y - 50.0) ** 2))) / 50))
G = vgrid(1, 1, 'mean')
if profileON:
print('Profiling on.')
lp = LineProfiler()
GAddProfiled = lp(G.add)
lp.add_function(G.mean)
GAddProfiled(x, y, z, 1)
return (G, lp)
else:
G.add(x, y, z, 1)
return G | 2,034,815,446,680,505,300 | Method to test gridding. | VGRID/vgrid.py | gridTest | valschmidt/vgrid | python | def gridTest(N=2, ProfileON=False):
' '
print(('N=%d' % N))
x = (np.random.random((N, 1)) * 100)
y = (np.random.random((N, 1)) * 100)
z = np.exp((np.sqrt((((x - 50.0) ** 2) + ((y - 50.0) ** 2))) / 50))
G = vgrid(1, 1, 'mean')
if profileON:
print('Profiling on.')
lp = LineProfiler()
GAddProfiled = lp(G.add)
lp.add_function(G.mean)
GAddProfiled(x, y, z, 1)
return (G, lp)
else:
G.add(x, y, z, 1)
return G |
def setup_platform(hass, config, add_entities, discovery_info=None):
'Set up the Samsung TV platform.'
known_devices = hass.data.get(KNOWN_DEVICES_KEY)
if (known_devices is None):
known_devices = set()
hass.data[KNOWN_DEVICES_KEY] = known_devices
uuid = None
if (config.get(CONF_HOST) is not None):
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
mac = config.get(CONF_MAC)
timeout = config.get(CONF_TIMEOUT)
elif (discovery_info is not None):
tv_name = discovery_info.get('name')
model = discovery_info.get('model_name')
host = discovery_info.get('host')
name = '{} ({})'.format(tv_name, model)
port = DEFAULT_PORT
timeout = DEFAULT_TIMEOUT
mac = None
udn = discovery_info.get('udn')
if (udn and udn.startswith('uuid:')):
uuid = udn[len('uuid:'):]
else:
_LOGGER.warning('Cannot determine device')
return
ip_addr = socket.gethostbyname(host)
if (ip_addr not in known_devices):
known_devices.add(ip_addr)
add_entities([SamsungTVDevice(host, port, name, timeout, mac, uuid)])
_LOGGER.info("Samsung TV %s:%d added as '%s'", host, port, name)
else:
_LOGGER.info('Ignoring duplicate Samsung TV %s:%d', host, port) | 7,478,801,825,985,793,000 | Set up the Samsung TV platform. | homeassistant/components/samsungtv/media_player.py | setup_platform | MagicalTrev89/home-assistant | python | def setup_platform(hass, config, add_entities, discovery_info=None):
known_devices = hass.data.get(KNOWN_DEVICES_KEY)
if (known_devices is None):
known_devices = set()
hass.data[KNOWN_DEVICES_KEY] = known_devices
uuid = None
if (config.get(CONF_HOST) is not None):
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
mac = config.get(CONF_MAC)
timeout = config.get(CONF_TIMEOUT)
elif (discovery_info is not None):
tv_name = discovery_info.get('name')
model = discovery_info.get('model_name')
host = discovery_info.get('host')
name = '{} ({})'.format(tv_name, model)
port = DEFAULT_PORT
timeout = DEFAULT_TIMEOUT
mac = None
udn = discovery_info.get('udn')
if (udn and udn.startswith('uuid:')):
uuid = udn[len('uuid:'):]
else:
_LOGGER.warning('Cannot determine device')
return
ip_addr = socket.gethostbyname(host)
if (ip_addr not in known_devices):
known_devices.add(ip_addr)
add_entities([SamsungTVDevice(host, port, name, timeout, mac, uuid)])
_LOGGER.info("Samsung TV %s:%d added as '%s'", host, port, name)
else:
_LOGGER.info('Ignoring duplicate Samsung TV %s:%d', host, port) |
def __init__(self, host, port, name, timeout, mac, uuid):
'Initialize the Samsung device.'
from samsungctl import exceptions
from samsungctl import Remote
import wakeonlan
self._exceptions_class = exceptions
self._remote_class = Remote
self._name = name
self._mac = mac
self._uuid = uuid
self._wol = wakeonlan
self._muted = False
self._playing = True
self._state = None
self._remote = None
self._end_of_power_off = None
self._config = {'name': 'HomeAssistant', 'description': name, 'id': 'ha.component.samsung', 'port': port, 'host': host, 'timeout': timeout}
if (self._config['port'] in (8001, 8002)):
self._config['method'] = 'websocket'
else:
self._config['method'] = 'legacy' | 5,848,817,460,881,256,000 | Initialize the Samsung device. | homeassistant/components/samsungtv/media_player.py | __init__ | MagicalTrev89/home-assistant | python | def __init__(self, host, port, name, timeout, mac, uuid):
from samsungctl import exceptions
from samsungctl import Remote
import wakeonlan
self._exceptions_class = exceptions
self._remote_class = Remote
self._name = name
self._mac = mac
self._uuid = uuid
self._wol = wakeonlan
self._muted = False
self._playing = True
self._state = None
self._remote = None
self._end_of_power_off = None
self._config = {'name': 'HomeAssistant', 'description': name, 'id': 'ha.component.samsung', 'port': port, 'host': host, 'timeout': timeout}
if (self._config['port'] in (8001, 8002)):
self._config['method'] = 'websocket'
else:
self._config['method'] = 'legacy' |
def update(self):
'Update state of device.'
self.send_key('KEY') | -2,328,262,338,321,575,400 | Update state of device. | homeassistant/components/samsungtv/media_player.py | update | MagicalTrev89/home-assistant | python | def update(self):
self.send_key('KEY') |
def get_remote(self):
'Create or return a remote control instance.'
if (self._remote is None):
self._remote = self._remote_class(self._config)
return self._remote | 6,487,959,911,410,992,000 | Create or return a remote control instance. | homeassistant/components/samsungtv/media_player.py | get_remote | MagicalTrev89/home-assistant | python | def get_remote(self):
if (self._remote is None):
self._remote = self._remote_class(self._config)
return self._remote |
def send_key(self, key):
'Send a key to the tv and handles exceptions.'
if (self._power_off_in_progress() and (key not in ('KEY_POWER', 'KEY_POWEROFF'))):
_LOGGER.info('TV is powering off, not sending command: %s', key)
return
try:
retry_count = 1
for _ in range((retry_count + 1)):
try:
self.get_remote().control(key)
break
except (self._exceptions_class.ConnectionClosed, BrokenPipeError):
self._remote = None
self._state = STATE_ON
except (self._exceptions_class.UnhandledResponse, self._exceptions_class.AccessDenied):
self._state = STATE_ON
self._remote = None
_LOGGER.debug('Failed sending command %s', key, exc_info=True)
return
except OSError:
self._state = STATE_OFF
self._remote = None
if self._power_off_in_progress():
self._state = STATE_OFF | -9,098,840,057,020,562,000 | Send a key to the tv and handles exceptions. | homeassistant/components/samsungtv/media_player.py | send_key | MagicalTrev89/home-assistant | python | def send_key(self, key):
if (self._power_off_in_progress() and (key not in ('KEY_POWER', 'KEY_POWEROFF'))):
_LOGGER.info('TV is powering off, not sending command: %s', key)
return
try:
retry_count = 1
for _ in range((retry_count + 1)):
try:
self.get_remote().control(key)
break
except (self._exceptions_class.ConnectionClosed, BrokenPipeError):
self._remote = None
self._state = STATE_ON
except (self._exceptions_class.UnhandledResponse, self._exceptions_class.AccessDenied):
self._state = STATE_ON
self._remote = None
_LOGGER.debug('Failed sending command %s', key, exc_info=True)
return
except OSError:
self._state = STATE_OFF
self._remote = None
if self._power_off_in_progress():
self._state = STATE_OFF |
@property
def unique_id(self) -> str:
'Return the unique ID of the device.'
return self._uuid | 1,727,077,770,470,627,600 | Return the unique ID of the device. | homeassistant/components/samsungtv/media_player.py | unique_id | MagicalTrev89/home-assistant | python | @property
def unique_id(self) -> str:
return self._uuid |
@property
def name(self):
'Return the name of the device.'
return self._name | -4,231,536,673,663,769,600 | Return the name of the device. | homeassistant/components/samsungtv/media_player.py | name | MagicalTrev89/home-assistant | python | @property
def name(self):
return self._name |
@property
def state(self):
'Return the state of the device.'
return self._state | -1,086,931,682,847,915,500 | Return the state of the device. | homeassistant/components/samsungtv/media_player.py | state | MagicalTrev89/home-assistant | python | @property
def state(self):
return self._state |
@property
def is_volume_muted(self):
'Boolean if volume is currently muted.'
return self._muted | 7,300,793,638,546,656,000 | Boolean if volume is currently muted. | homeassistant/components/samsungtv/media_player.py | is_volume_muted | MagicalTrev89/home-assistant | python | @property
def is_volume_muted(self):
return self._muted |
@property
def source_list(self):
'List of available input sources.'
return list(SOURCES) | -9,049,588,076,648,536,000 | List of available input sources. | homeassistant/components/samsungtv/media_player.py | source_list | MagicalTrev89/home-assistant | python | @property
def source_list(self):
return list(SOURCES) |
@property
def supported_features(self):
'Flag media player features that are supported.'
if self._mac:
return (SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON)
return SUPPORT_SAMSUNGTV | 7,980,298,372,656,887,000 | Flag media player features that are supported. | homeassistant/components/samsungtv/media_player.py | supported_features | MagicalTrev89/home-assistant | python | @property
def supported_features(self):
if self._mac:
return (SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON)
return SUPPORT_SAMSUNGTV |
def turn_off(self):
'Turn off media player.'
self._end_of_power_off = (dt_util.utcnow() + timedelta(seconds=15))
if (self._config['method'] == 'websocket'):
self.send_key('KEY_POWER')
else:
self.send_key('KEY_POWEROFF')
try:
self.get_remote().close()
self._remote = None
except OSError:
_LOGGER.debug('Could not establish connection.') | 4,279,632,299,341,431,000 | Turn off media player. | homeassistant/components/samsungtv/media_player.py | turn_off | MagicalTrev89/home-assistant | python | def turn_off(self):
self._end_of_power_off = (dt_util.utcnow() + timedelta(seconds=15))
if (self._config['method'] == 'websocket'):
self.send_key('KEY_POWER')
else:
self.send_key('KEY_POWEROFF')
try:
self.get_remote().close()
self._remote = None
except OSError:
_LOGGER.debug('Could not establish connection.') |
def volume_up(self):
'Volume up the media player.'
self.send_key('KEY_VOLUP') | 559,289,289,374,248,450 | Volume up the media player. | homeassistant/components/samsungtv/media_player.py | volume_up | MagicalTrev89/home-assistant | python | def volume_up(self):
self.send_key('KEY_VOLUP') |
def volume_down(self):
'Volume down media player.'
self.send_key('KEY_VOLDOWN') | 7,823,773,795,804,483,000 | Volume down media player. | homeassistant/components/samsungtv/media_player.py | volume_down | MagicalTrev89/home-assistant | python | def volume_down(self):
self.send_key('KEY_VOLDOWN') |
def mute_volume(self, mute):
'Send mute command.'
self.send_key('KEY_MUTE') | -5,766,217,316,642,036,000 | Send mute command. | homeassistant/components/samsungtv/media_player.py | mute_volume | MagicalTrev89/home-assistant | python | def mute_volume(self, mute):
self.send_key('KEY_MUTE') |
def media_play_pause(self):
'Simulate play pause media player.'
if self._playing:
self.media_pause()
else:
self.media_play() | 5,424,839,084,411,945,000 | Simulate play pause media player. | homeassistant/components/samsungtv/media_player.py | media_play_pause | MagicalTrev89/home-assistant | python | def media_play_pause(self):
if self._playing:
self.media_pause()
else:
self.media_play() |
def media_play(self):
'Send play command.'
self._playing = True
self.send_key('KEY_PLAY') | -4,624,043,560,243,361,000 | Send play command. | homeassistant/components/samsungtv/media_player.py | media_play | MagicalTrev89/home-assistant | python | def media_play(self):
self._playing = True
self.send_key('KEY_PLAY') |
def media_pause(self):
'Send media pause command to media player.'
self._playing = False
self.send_key('KEY_PAUSE') | -4,419,400,526,847,819,000 | Send media pause command to media player. | homeassistant/components/samsungtv/media_player.py | media_pause | MagicalTrev89/home-assistant | python | def media_pause(self):
self._playing = False
self.send_key('KEY_PAUSE') |
def media_next_track(self):
'Send next track command.'
self.send_key('KEY_FF') | 7,350,569,723,410,886,000 | Send next track command. | homeassistant/components/samsungtv/media_player.py | media_next_track | MagicalTrev89/home-assistant | python | def media_next_track(self):
self.send_key('KEY_FF') |
def media_previous_track(self):
'Send the previous track command.'
self.send_key('KEY_REWIND') | -6,217,111,541,976,905,000 | Send the previous track command. | homeassistant/components/samsungtv/media_player.py | media_previous_track | MagicalTrev89/home-assistant | python | def media_previous_track(self):
self.send_key('KEY_REWIND') |
async def async_play_media(self, media_type, media_id, **kwargs):
'Support changing a channel.'
if (media_type != MEDIA_TYPE_CHANNEL):
_LOGGER.error('Unsupported media type')
return
try:
cv.positive_int(media_id)
except vol.Invalid:
_LOGGER.error('Media ID must be positive integer')
return
for digit in media_id:
(await self.hass.async_add_job(self.send_key, ('KEY_' + digit)))
(await asyncio.sleep(KEY_PRESS_TIMEOUT, self.hass.loop))
(await self.hass.async_add_job(self.send_key, 'KEY_ENTER')) | 922,428,579,967,757,400 | Support changing a channel. | homeassistant/components/samsungtv/media_player.py | async_play_media | MagicalTrev89/home-assistant | python | async def async_play_media(self, media_type, media_id, **kwargs):
if (media_type != MEDIA_TYPE_CHANNEL):
_LOGGER.error('Unsupported media type')
return
try:
cv.positive_int(media_id)
except vol.Invalid:
_LOGGER.error('Media ID must be positive integer')
return
for digit in media_id:
(await self.hass.async_add_job(self.send_key, ('KEY_' + digit)))
(await asyncio.sleep(KEY_PRESS_TIMEOUT, self.hass.loop))
(await self.hass.async_add_job(self.send_key, 'KEY_ENTER')) |
def turn_on(self):
'Turn the media player on.'
if self._mac:
self._wol.send_magic_packet(self._mac)
else:
self.send_key('KEY_POWERON') | -8,216,487,931,362,533,000 | Turn the media player on. | homeassistant/components/samsungtv/media_player.py | turn_on | MagicalTrev89/home-assistant | python | def turn_on(self):
if self._mac:
self._wol.send_magic_packet(self._mac)
else:
self.send_key('KEY_POWERON') |
async def async_select_source(self, source):
'Select input source.'
if (source not in SOURCES):
_LOGGER.error('Unsupported source')
return
(await self.hass.async_add_job(self.send_key, SOURCES[source])) | 2,872,646,657,564,179,500 | Select input source. | homeassistant/components/samsungtv/media_player.py | async_select_source | MagicalTrev89/home-assistant | python | async def async_select_source(self, source):
if (source not in SOURCES):
_LOGGER.error('Unsupported source')
return
(await self.hass.async_add_job(self.send_key, SOURCES[source])) |
def GenerateConfig(context):
'Generate configuration.'
base_name = context.env['name']
instance = {'zone': context.properties['zone'], 'machineType': ZonalComputeUrl(context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': {'items': [{'key': 'gce-container-declaration', 'value': GenerateManifest(context)}]}, 'disks': [{'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': {'diskName': (base_name + '-disk'), 'sourceImage': GlobalComputeUrl('cos-cloud', 'images', context.properties['containerImage'])}}], 'networkInterfaces': [{'accessConfigs': [{'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT'}], 'network': GlobalComputeUrl(context.env['project'], 'networks', 'default')}], 'serviceAccounts': [{'email': 'default', 'scopes': ['https://www.googleapis.com/auth/logging.write']}]}
resources = {'resources': [{'name': base_name, 'type': 'compute.v1.instance', 'properties': instance}]}
return resources | -2,596,707,007,980,729,300 | Generate configuration. | templates/container_vm.py | GenerateConfig | AlexBulankou/dm-logbook-sample | python | def GenerateConfig(context):
base_name = context.env['name']
instance = {'zone': context.properties['zone'], 'machineType': ZonalComputeUrl(context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': {'items': [{'key': 'gce-container-declaration', 'value': GenerateManifest(context)}]}, 'disks': [{'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': {'diskName': (base_name + '-disk'), 'sourceImage': GlobalComputeUrl('cos-cloud', 'images', context.properties['containerImage'])}}], 'networkInterfaces': [{'accessConfigs': [{'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT'}], 'network': GlobalComputeUrl(context.env['project'], 'networks', 'default')}], 'serviceAccounts': [{'email': 'default', 'scopes': ['https://www.googleapis.com/auth/logging.write']}]}
resources = {'resources': [{'name': base_name, 'type': 'compute.v1.instance', 'properties': instance}]}
return resources |
def __init__(self, server_port=None):
'Initialize.\n\n :param server_port: Int. local port.\n '
self.server_port = server_port
self.logged_requests = {}
self.analysis = {'total_requests': 0, 'domains': set(), 'duration': 0} | -7,691,464,755,553,936,000 | Initialize.
:param server_port: Int. local port. | monitor_requests/data.py | __init__ | danpozmanter/monitor_requests | python | def __init__(self, server_port=None):
'Initialize.\n\n :param server_port: Int. local port.\n '
self.server_port = server_port
self.logged_requests = {}
self.analysis = {'total_requests': 0, 'domains': set(), 'duration': 0} |
def delete(self):
'Delete data from server if applicable.'
if (not self.server_port):
return
self._delete() | -1,416,033,866,173,989,000 | Delete data from server if applicable. | monitor_requests/data.py | delete | danpozmanter/monitor_requests | python | def delete(self):
if (not self.server_port):
return
self._delete() |
def log(self, url, domain, method, response, tb_list, duration):
'Log request, store traceback/response data and update counts.'
if self.server_port:
self._post({'url': url, 'domain': domain, 'method': method, 'response_content': str(response.content), 'response_status_code': response.status_code, 'duration': duration, 'traceback_list': tb_list})
else:
if (url not in self.logged_requests):
self.logged_requests[url] = {'count': 0, 'methods': set(), 'tracebacks': set(), 'responses': set()}
self.logged_requests[url]['count'] += 1
self.logged_requests[url]['methods'].add(method)
self.logged_requests[url]['tracebacks'].add(tuple(tb_list))
self.logged_requests[url]['responses'].add((response.status_code, response.content))
self.analysis['duration'] += duration
self.analysis['total_requests'] += 1
self.analysis['domains'].add(domain) | 6,867,242,971,886,877,000 | Log request, store traceback/response data and update counts. | monitor_requests/data.py | log | danpozmanter/monitor_requests | python | def log(self, url, domain, method, response, tb_list, duration):
if self.server_port:
self._post({'url': url, 'domain': domain, 'method': method, 'response_content': str(response.content), 'response_status_code': response.status_code, 'duration': duration, 'traceback_list': tb_list})
else:
if (url not in self.logged_requests):
self.logged_requests[url] = {'count': 0, 'methods': set(), 'tracebacks': set(), 'responses': set()}
self.logged_requests[url]['count'] += 1
self.logged_requests[url]['methods'].add(method)
self.logged_requests[url]['tracebacks'].add(tuple(tb_list))
self.logged_requests[url]['responses'].add((response.status_code, response.content))
self.analysis['duration'] += duration
self.analysis['total_requests'] += 1
self.analysis['domains'].add(domain) |
def retrieve(self):
'Retrieve data from server or instance.'
if (not self.server_port):
return (self.logged_requests, self.analysis)
data = self._get()
return (data.get('logged_requests'), data.get('analysis')) | 9,150,144,056,110,489,000 | Retrieve data from server or instance. | monitor_requests/data.py | retrieve | danpozmanter/monitor_requests | python | def retrieve(self):
if (not self.server_port):
return (self.logged_requests, self.analysis)
data = self._get()
return (data.get('logged_requests'), data.get('analysis')) |
def has_perm(self, perm, obj=None):
'Does the user have a specific permission?'
return True | -9,084,859,824,158,067,000 | Does the user have a specific permission? | dentalapp-backend/dentalapp/userauth/models.py | has_perm | PavelescuVictor/DentalApplication | python | def has_perm(self, perm, obj=None):
return True |
def has_module_perms(self, app_label):
'Does the user have permissions to view the app `app_label`?'
return True | 4,992,969,413,468,943,000 | Does the user have permissions to view the app `app_label`? | dentalapp-backend/dentalapp/userauth/models.py | has_module_perms | PavelescuVictor/DentalApplication | python | def has_module_perms(self, app_label):
return True |
def get_args():
' Get args from stdin.\n\n The common options are defined in the object\n libs.nnet3.train.common.CommonParser.parser.\n See steps/libs/nnet3/train/common.py\n '
parser = argparse.ArgumentParser(description='Trains a feed forward raw DNN (without transition model)\n using frame-level objectives like cross-entropy and mean-squared-error.\n DNNs include simple DNNs, TDNNs and CNNs.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, conflict_handler='resolve', parents=[common_train_lib.CommonParser(include_chunk_context=False).parser])
parser.add_argument('--egs.frames-per-eg', type=int, dest='frames_per_eg', default=8, help='Number of output labels per example')
parser.add_argument('--image.augmentation-opts', type=str, dest='image_augmentation_opts', default=None, help='Image augmentation options')
parser.add_argument('--trainer.input-model', type=str, dest='input_model', default=None, action=common_lib.NullstrToNoneAction, help='If specified, this model is used as initial\n raw model (0.raw in the script) instead of initializing\n the model from xconfig. Configs dir is not expected to\n exist and left/right context is computed from this\n model.')
parser.add_argument('--trainer.prior-subset-size', type=int, dest='prior_subset_size', default=20000, help='Number of samples for computing priors')
parser.add_argument('--trainer.num-jobs-compute-prior', type=int, dest='num_jobs_compute_prior', default=10, help='The prior computation jobs are single threaded and run on the CPU')
parser.add_argument('--trainer.optimization.minibatch-size', type=str, dest='minibatch_size', default='512', help='Size of the minibatch used in SGD training\n (argument to nnet3-merge-egs); may be a more general\n rule as accepted by the --minibatch-size option of\n nnet3-merge-egs; run that program without args to see\n the format.')
parser.add_argument('--compute-average-posteriors', type=str, action=common_lib.StrToBoolAction, choices=['true', 'false'], default=False, help='If true, then the average output of the\n network is computed and dumped as post.final.vec')
parser.add_argument('--nj', type=int, default=4, help='Number of parallel jobs')
parser.add_argument('--use-dense-targets', type=str, action=common_lib.StrToBoolAction, default=True, choices=['true', 'false'], help='Train neural network using dense targets')
parser.add_argument('--feat-dir', type=str, required=False, help='Directory with features used for training the neural network.')
parser.add_argument('--targets-scp', type=str, required=False, help='Targets for training neural network.\n This is a kaldi-format SCP file of target matrices.\n <utterance-id> <extended-filename-of-target-matrix>.\n The target matrix\'s column dim must match \n the neural network output dim, and the\n row dim must match the number of output frames \n i.e. after subsampling if "--frame-subsampling-factor" \n option is passed to --egs.opts.')
parser.add_argument('--vad-egs', type=str, action=common_lib.StrToBoolAction, default=False, choices=['true', 'false'], help='Get nnet3 egs with vad applied on features.')
parser.add_argument('--dir', type=str, required=True, help='Directory to store the models and all other files.')
print(' '.join(sys.argv))
print(sys.argv)
args = parser.parse_args()
[args, run_opts] = process_args(args)
return [args, run_opts] | 3,313,513,404,749,398,000 | Get args from stdin.
The common options are defined in the object
libs.nnet3.train.common.CommonParser.parser.
See steps/libs/nnet3/train/common.py | egs/wsj/s5/steps/nnet3/train_raw_dnn.py | get_args | iezhanqingran/kaldi | python | def get_args():
' Get args from stdin.\n\n The common options are defined in the object\n libs.nnet3.train.common.CommonParser.parser.\n See steps/libs/nnet3/train/common.py\n '
parser = argparse.ArgumentParser(description='Trains a feed forward raw DNN (without transition model)\n using frame-level objectives like cross-entropy and mean-squared-error.\n DNNs include simple DNNs, TDNNs and CNNs.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, conflict_handler='resolve', parents=[common_train_lib.CommonParser(include_chunk_context=False).parser])
parser.add_argument('--egs.frames-per-eg', type=int, dest='frames_per_eg', default=8, help='Number of output labels per example')
parser.add_argument('--image.augmentation-opts', type=str, dest='image_augmentation_opts', default=None, help='Image augmentation options')
parser.add_argument('--trainer.input-model', type=str, dest='input_model', default=None, action=common_lib.NullstrToNoneAction, help='If specified, this model is used as initial\n raw model (0.raw in the script) instead of initializing\n the model from xconfig. Configs dir is not expected to\n exist and left/right context is computed from this\n model.')
parser.add_argument('--trainer.prior-subset-size', type=int, dest='prior_subset_size', default=20000, help='Number of samples for computing priors')
parser.add_argument('--trainer.num-jobs-compute-prior', type=int, dest='num_jobs_compute_prior', default=10, help='The prior computation jobs are single threaded and run on the CPU')
parser.add_argument('--trainer.optimization.minibatch-size', type=str, dest='minibatch_size', default='512', help='Size of the minibatch used in SGD training\n (argument to nnet3-merge-egs); may be a more general\n rule as accepted by the --minibatch-size option of\n nnet3-merge-egs; run that program without args to see\n the format.')
parser.add_argument('--compute-average-posteriors', type=str, action=common_lib.StrToBoolAction, choices=['true', 'false'], default=False, help='If true, then the average output of the\n network is computed and dumped as post.final.vec')
parser.add_argument('--nj', type=int, default=4, help='Number of parallel jobs')
parser.add_argument('--use-dense-targets', type=str, action=common_lib.StrToBoolAction, default=True, choices=['true', 'false'], help='Train neural network using dense targets')
parser.add_argument('--feat-dir', type=str, required=False, help='Directory with features used for training the neural network.')
parser.add_argument('--targets-scp', type=str, required=False, help='Targets for training neural network.\n This is a kaldi-format SCP file of target matrices.\n <utterance-id> <extended-filename-of-target-matrix>.\n The target matrix\'s column dim must match \n the neural network output dim, and the\n row dim must match the number of output frames \n i.e. after subsampling if "--frame-subsampling-factor" \n option is passed to --egs.opts.')
parser.add_argument('--vad-egs', type=str, action=common_lib.StrToBoolAction, default=False, choices=['true', 'false'], help='Get nnet3 egs with vad applied on features.')
parser.add_argument('--dir', type=str, required=True, help='Directory to store the models and all other files.')
print(' '.join(sys.argv))
print(sys.argv)
args = parser.parse_args()
[args, run_opts] = process_args(args)
return [args, run_opts] |
def process_args(args):
' Process the options got from get_args()\n '
if (args.frames_per_eg < 1):
raise Exception('--egs.frames-per-eg should have a minimum value of 1')
if (not common_train_lib.validate_minibatch_size_str(args.minibatch_size)):
raise Exception('--trainer.optimization.minibatch-size has an invalid value')
if (not os.path.exists(args.dir)):
raise Exception('Directory specified with --dir={0} does not exist.'.format(args.dir))
if ((not os.path.exists((args.dir + '/configs'))) and ((args.input_model is None) or (not os.path.exists(args.input_model)))):
raise Exception('Either --trainer.input-model option should be supplied, and exist; or the {0}/configs directory should exist.{0}/configs is the output of make_configs.py'.format(args.dir))
run_opts = common_train_lib.RunOpts()
if (args.use_gpu in ['true', 'false']):
args.use_gpu = ('yes' if (args.use_gpu == 'true') else 'no')
if (args.use_gpu in ['yes', 'wait']):
if (not common_lib.check_if_cuda_compiled()):
logger.warning('You are running with one thread but you have not compiled\n for CUDA. You may be running a setup optimized for GPUs.\n If you have GPUs and have nvcc installed, go to src/ and do\n ./configure; make')
run_opts.train_queue_opt = '--gpu 1'
run_opts.parallel_train_opts = '--use-gpu={}'.format(args.use_gpu)
run_opts.combine_gpu_opt = '--use-gpu={}'.format(args.use_gpu)
run_opts.combine_queue_opt = '--gpu 1'
run_opts.prior_gpu_opt = '--use-gpu={}'.format(args.use_gpu)
run_opts.prior_queue_opt = '--gpu 1'
else:
logger.warning('Without using a GPU this will be very slow. nnet3 does not yet support multiple threads.')
run_opts.train_queue_opt = ''
run_opts.parallel_train_opts = '--use-gpu=no'
run_opts.combine_gpu_opt = '--use-gpu=no'
run_opts.combine_queue_opt = ''
run_opts.prior_gpu_opt = '--use-gpu=no'
run_opts.prior_queue_opt = ''
run_opts.command = args.command
run_opts.egs_command = (args.egs_command if (args.egs_command is not None) else args.command)
run_opts.num_jobs_compute_prior = args.num_jobs_compute_prior
return [args, run_opts] | 5,270,192,292,623,299,000 | Process the options got from get_args() | egs/wsj/s5/steps/nnet3/train_raw_dnn.py | process_args | iezhanqingran/kaldi | python | def process_args(args):
' \n '
if (args.frames_per_eg < 1):
raise Exception('--egs.frames-per-eg should have a minimum value of 1')
if (not common_train_lib.validate_minibatch_size_str(args.minibatch_size)):
raise Exception('--trainer.optimization.minibatch-size has an invalid value')
if (not os.path.exists(args.dir)):
raise Exception('Directory specified with --dir={0} does not exist.'.format(args.dir))
if ((not os.path.exists((args.dir + '/configs'))) and ((args.input_model is None) or (not os.path.exists(args.input_model)))):
raise Exception('Either --trainer.input-model option should be supplied, and exist; or the {0}/configs directory should exist.{0}/configs is the output of make_configs.py'.format(args.dir))
run_opts = common_train_lib.RunOpts()
if (args.use_gpu in ['true', 'false']):
args.use_gpu = ('yes' if (args.use_gpu == 'true') else 'no')
if (args.use_gpu in ['yes', 'wait']):
if (not common_lib.check_if_cuda_compiled()):
logger.warning('You are running with one thread but you have not compiled\n for CUDA. You may be running a setup optimized for GPUs.\n If you have GPUs and have nvcc installed, go to src/ and do\n ./configure; make')
run_opts.train_queue_opt = '--gpu 1'
run_opts.parallel_train_opts = '--use-gpu={}'.format(args.use_gpu)
run_opts.combine_gpu_opt = '--use-gpu={}'.format(args.use_gpu)
run_opts.combine_queue_opt = '--gpu 1'
run_opts.prior_gpu_opt = '--use-gpu={}'.format(args.use_gpu)
run_opts.prior_queue_opt = '--gpu 1'
else:
logger.warning('Without using a GPU this will be very slow. nnet3 does not yet support multiple threads.')
run_opts.train_queue_opt =
run_opts.parallel_train_opts = '--use-gpu=no'
run_opts.combine_gpu_opt = '--use-gpu=no'
run_opts.combine_queue_opt =
run_opts.prior_gpu_opt = '--use-gpu=no'
run_opts.prior_queue_opt =
run_opts.command = args.command
run_opts.egs_command = (args.egs_command if (args.egs_command is not None) else args.command)
run_opts.num_jobs_compute_prior = args.num_jobs_compute_prior
return [args, run_opts] |
def train(args, run_opts):
' The main function for training.\n\n Args:\n args: a Namespace object with the required parameters\n obtained from the function process_args()\n run_opts: RunOpts object obtained from the process_args()\n '
arg_string = pprint.pformat(vars(args))
logger.info('Arguments for the experiment\n{0}'.format(arg_string))
feat_dim = common_lib.get_feat_dim(args.feat_dir)
ivector_dim = common_lib.get_ivector_dim(args.online_ivector_dir)
ivector_id = common_lib.get_ivector_extractor_id(args.online_ivector_dir)
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
if (args.input_model is None):
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
variables = common_train_lib.parse_generic_config_vars_file(var_file)
else:
variables = common_train_lib.get_input_model_info(args.input_model)
try:
model_left_context = variables['model_left_context']
model_right_context = variables['model_right_context']
except KeyError as e:
raise Exception('KeyError {0}: Variables need to be defined in {1}'.format(str(e), '{0}/configs'.format(args.dir)))
left_context = model_left_context
right_context = model_right_context
if ((args.stage <= (- 4)) and os.path.exists((args.dir + '/configs/init.config')) and (args.input_model is None)):
logger.info('Initializing the network for computing the LDA stats')
common_lib.execute_command('{command} {dir}/log/nnet_init.log nnet3-init --srand=-2 {dir}/configs/init.config {dir}/init.raw'.format(command=run_opts.command, dir=args.dir))
default_egs_dir = '{0}/egs'.format(args.dir)
if ((args.stage <= (- 3)) and (args.egs_dir is None)):
if ((args.targets_scp is None) or (args.feat_dir is None)):
raise Exception("If you don't supply the --egs-dir option, the --targets-scp and --feat-dir options are required.")
logger.info('Generating egs')
if args.use_dense_targets:
target_type = 'dense'
try:
num_targets = int(variables['num_targets'])
if (common_lib.get_feat_dim_from_scp(args.targets_scp) != num_targets):
raise Exception('Mismatch between num-targets provided to script vs configs')
except KeyError as e:
num_targets = (- 1)
else:
target_type = 'sparse'
try:
num_targets = int(variables['num_targets'])
except KeyError as e:
raise Exception('KeyError {0}: Variables need to be defined in {1}'.format(str(e), '{0}/configs'.format(args.dir)))
train_lib.raw_model.generate_egs_using_targets(data=args.feat_dir, targets_scp=args.targets_scp, vad_egs=args.vad_egs, egs_dir=default_egs_dir, left_context=left_context, right_context=right_context, run_opts=run_opts, frames_per_eg_str=str(args.frames_per_eg), srand=args.srand, egs_opts=args.egs_opts, cmvn_opts=args.cmvn_opts, online_ivector_dir=args.online_ivector_dir, samples_per_iter=args.samples_per_iter, stage=args.egs_stage, target_type=target_type, num_targets=num_targets)
if (args.egs_dir is None):
egs_dir = default_egs_dir
else:
egs_dir = args.egs_dir
[egs_left_context, egs_right_context, frames_per_eg_str, num_archives] = common_train_lib.verify_egs_dir(egs_dir, feat_dim, ivector_dim, ivector_id, left_context, right_context)
assert (str(args.frames_per_eg) == frames_per_eg_str)
if (args.num_jobs_final > num_archives):
raise Exception('num_jobs_final cannot exceed the number of archives in the egs directory')
common_train_lib.copy_egs_properties_to_exp_dir(egs_dir, args.dir)
if ((args.stage <= (- 2)) and os.path.exists((args.dir + '/configs/init.config')) and (args.input_model is None)):
logger.info('Computing the preconditioning matrix for input features')
train_lib.common.compute_preconditioning_matrix(args.dir, egs_dir, num_archives, run_opts, max_lda_jobs=args.max_lda_jobs, rand_prune=args.rand_prune)
if (args.stage <= (- 2)):
logger.info('Computing initial vector for FixedScaleComponent before softmax, using priors^{prior_scale} and rescaling to average 1'.format(prior_scale=args.presoftmax_prior_scale_power))
counts_path = (os.path.dirname(args.targets_scp) + '/target_counts')
common_train_lib.compute_presoftmax_prior_scale_targets(args.dir, counts_path, presoftmax_prior_scale_power=args.presoftmax_prior_scale_power)
if (args.stage <= (- 1)):
logger.info('Preparing the initial network.')
common_train_lib.prepare_initial_network(args.dir, run_opts, args.srand, args.input_model)
num_archives_expanded = (num_archives * args.frames_per_eg)
num_archives_to_process = int((args.num_epochs * num_archives_expanded))
num_archives_processed = 0
num_iters = int(((num_archives_to_process * 2) / (args.num_jobs_initial + args.num_jobs_final)))
if args.do_final_combination:
models_to_combine = common_train_lib.get_model_combine_iters(num_iters, args.num_epochs, num_archives_expanded, args.max_models_combine, args.num_jobs_final)
else:
models_to_combine = None
if os.path.exists('{0}/valid_diagnostic.scp'.format(egs_dir)):
if os.path.exists('{0}/valid_diagnostic.egs'.format(egs_dir)):
raise Exception('both {0}/valid_diagnostic.egs and {0}/valid_diagnostic.scp exist.This script expects only one of them to exist.'.format(egs_dir))
use_multitask_egs = True
else:
if (not os.path.exists('{0}/valid_diagnostic.egs'.format(egs_dir))):
raise Exception('neither {0}/valid_diagnostic.egs nor {0}/valid_diagnostic.scp exist.This script expects one of them.'.format(egs_dir))
use_multitask_egs = False
logger.info('Training will run for {0} epochs = {1} iterations'.format(args.num_epochs, num_iters))
for iter in range(num_iters):
if ((args.exit_stage is not None) and (iter == args.exit_stage)):
logger.info('Exiting early due to --exit-stage {0}'.format(iter))
return
current_num_jobs = common_train_lib.get_current_num_jobs(iter, num_iters, args.num_jobs_initial, args.num_jobs_step, args.num_jobs_final)
if (args.stage <= iter):
lrate = common_train_lib.get_learning_rate(iter, current_num_jobs, num_iters, num_archives_processed, num_archives_to_process, args.initial_effective_lrate, args.final_effective_lrate)
shrinkage_value = (1.0 - (args.proportional_shrink * lrate))
if (shrinkage_value <= 0.5):
raise Exception('proportional-shrink={0} is too large, it gives shrink-value={1}'.format(args.proportional_shrink, shrinkage_value))
percent = ((num_archives_processed * 100.0) / num_archives_to_process)
epoch = ((num_archives_processed * args.num_epochs) / num_archives_to_process)
shrink_info_str = ''
if (shrinkage_value != 1.0):
shrink_info_str = 'shrink: {0:0.5f}'.format(shrinkage_value)
logger.info('Iter: {0}/{1} Jobs: {2} Epoch: {3:0.2f}/{4:0.1f} ({5:0.1f}% complete) lr: {6:0.6f} {7}'.format(iter, (num_iters - 1), current_num_jobs, epoch, args.num_epochs, percent, lrate, shrink_info_str))
train_lib.common.train_one_iteration(dir=args.dir, iter=iter, srand=args.srand, egs_dir=egs_dir, num_jobs=current_num_jobs, num_archives_processed=num_archives_processed, num_archives=num_archives, learning_rate=lrate, dropout_edit_string=common_train_lib.get_dropout_edit_string(args.dropout_schedule, (float(num_archives_processed) / num_archives_to_process), iter), train_opts=' '.join(args.train_opts), minibatch_size_str=args.minibatch_size, frames_per_eg=args.frames_per_eg, momentum=args.momentum, max_param_change=args.max_param_change, shrinkage_value=shrinkage_value, shuffle_buffer_size=args.shuffle_buffer_size, run_opts=run_opts, get_raw_nnet_from_am=False, image_augmentation_opts=args.image_augmentation_opts, use_multitask_egs=use_multitask_egs, backstitch_training_scale=args.backstitch_training_scale, backstitch_training_interval=args.backstitch_training_interval)
if args.cleanup:
common_train_lib.remove_model(args.dir, (iter - 2), num_iters, models_to_combine, args.preserve_model_interval, get_raw_nnet_from_am=False)
if (args.email is not None):
reporting_iter_interval = (num_iters * args.reporting_interval)
if ((iter % reporting_iter_interval) == 0):
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(args.dir)
message = report
subject = 'Update : Expt {dir} : Iter {iter}'.format(dir=args.dir, iter=iter)
common_lib.send_mail(message, subject, args.email)
num_archives_processed = (num_archives_processed + current_num_jobs)
if (args.stage <= num_iters):
if args.do_final_combination:
logger.info('Doing final combination to produce final.raw')
train_lib.common.combine_models(dir=args.dir, num_iters=num_iters, models_to_combine=models_to_combine, egs_dir=egs_dir, minibatch_size_str=args.minibatch_size, run_opts=run_opts, get_raw_nnet_from_am=False, max_objective_evaluations=args.max_objective_evaluations, use_multitask_egs=use_multitask_egs)
else:
common_lib.force_symlink('{0}.raw'.format(num_iters), '{0}/final.raw'.format(args.dir))
if (args.compute_average_posteriors and (args.stage <= (num_iters + 1))):
logger.info("Getting average posterior for output-node 'output'.")
train_lib.common.compute_average_posterior(dir=args.dir, iter='final', egs_dir=egs_dir, num_archives=num_archives, prior_subset_size=args.prior_subset_size, run_opts=run_opts, get_raw_nnet_from_am=False)
if args.cleanup:
logger.info('Cleaning up the experiment directory {0}'.format(args.dir))
remove_egs = args.remove_egs
if (args.egs_dir is not None):
remove_egs = False
common_train_lib.clean_nnet_dir(nnet_dir=args.dir, num_iters=num_iters, egs_dir=egs_dir, preserve_model_interval=args.preserve_model_interval, remove_egs=remove_egs, get_raw_nnet_from_am=False)
outputs_list = common_train_lib.get_outputs_list('{0}/final.raw'.format(args.dir), get_raw_nnet_from_am=False)
if ('output' in outputs_list):
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(args.dir)
if (args.email is not None):
common_lib.send_mail(report, 'Update : Expt {0} : complete'.format(args.dir), args.email)
with open('{dir}/accuracy.{output_name}.report'.format(dir=args.dir, output_name='output'), 'w') as f:
f.write(report)
common_lib.execute_command('steps/info/nnet3_dir_info.pl {0}'.format(args.dir)) | 2,651,687,824,782,216,700 | The main function for training.
Args:
args: a Namespace object with the required parameters
obtained from the function process_args()
run_opts: RunOpts object obtained from the process_args() | egs/wsj/s5/steps/nnet3/train_raw_dnn.py | train | iezhanqingran/kaldi | python | def train(args, run_opts):
' The main function for training.\n\n Args:\n args: a Namespace object with the required parameters\n obtained from the function process_args()\n run_opts: RunOpts object obtained from the process_args()\n '
arg_string = pprint.pformat(vars(args))
logger.info('Arguments for the experiment\n{0}'.format(arg_string))
feat_dim = common_lib.get_feat_dim(args.feat_dir)
ivector_dim = common_lib.get_ivector_dim(args.online_ivector_dir)
ivector_id = common_lib.get_ivector_extractor_id(args.online_ivector_dir)
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
if (args.input_model is None):
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
variables = common_train_lib.parse_generic_config_vars_file(var_file)
else:
variables = common_train_lib.get_input_model_info(args.input_model)
try:
model_left_context = variables['model_left_context']
model_right_context = variables['model_right_context']
except KeyError as e:
raise Exception('KeyError {0}: Variables need to be defined in {1}'.format(str(e), '{0}/configs'.format(args.dir)))
left_context = model_left_context
right_context = model_right_context
if ((args.stage <= (- 4)) and os.path.exists((args.dir + '/configs/init.config')) and (args.input_model is None)):
logger.info('Initializing the network for computing the LDA stats')
common_lib.execute_command('{command} {dir}/log/nnet_init.log nnet3-init --srand=-2 {dir}/configs/init.config {dir}/init.raw'.format(command=run_opts.command, dir=args.dir))
default_egs_dir = '{0}/egs'.format(args.dir)
if ((args.stage <= (- 3)) and (args.egs_dir is None)):
if ((args.targets_scp is None) or (args.feat_dir is None)):
raise Exception("If you don't supply the --egs-dir option, the --targets-scp and --feat-dir options are required.")
logger.info('Generating egs')
if args.use_dense_targets:
target_type = 'dense'
try:
num_targets = int(variables['num_targets'])
if (common_lib.get_feat_dim_from_scp(args.targets_scp) != num_targets):
raise Exception('Mismatch between num-targets provided to script vs configs')
except KeyError as e:
num_targets = (- 1)
else:
target_type = 'sparse'
try:
num_targets = int(variables['num_targets'])
except KeyError as e:
raise Exception('KeyError {0}: Variables need to be defined in {1}'.format(str(e), '{0}/configs'.format(args.dir)))
train_lib.raw_model.generate_egs_using_targets(data=args.feat_dir, targets_scp=args.targets_scp, vad_egs=args.vad_egs, egs_dir=default_egs_dir, left_context=left_context, right_context=right_context, run_opts=run_opts, frames_per_eg_str=str(args.frames_per_eg), srand=args.srand, egs_opts=args.egs_opts, cmvn_opts=args.cmvn_opts, online_ivector_dir=args.online_ivector_dir, samples_per_iter=args.samples_per_iter, stage=args.egs_stage, target_type=target_type, num_targets=num_targets)
if (args.egs_dir is None):
egs_dir = default_egs_dir
else:
egs_dir = args.egs_dir
[egs_left_context, egs_right_context, frames_per_eg_str, num_archives] = common_train_lib.verify_egs_dir(egs_dir, feat_dim, ivector_dim, ivector_id, left_context, right_context)
assert (str(args.frames_per_eg) == frames_per_eg_str)
if (args.num_jobs_final > num_archives):
raise Exception('num_jobs_final cannot exceed the number of archives in the egs directory')
common_train_lib.copy_egs_properties_to_exp_dir(egs_dir, args.dir)
if ((args.stage <= (- 2)) and os.path.exists((args.dir + '/configs/init.config')) and (args.input_model is None)):
logger.info('Computing the preconditioning matrix for input features')
train_lib.common.compute_preconditioning_matrix(args.dir, egs_dir, num_archives, run_opts, max_lda_jobs=args.max_lda_jobs, rand_prune=args.rand_prune)
if (args.stage <= (- 2)):
logger.info('Computing initial vector for FixedScaleComponent before softmax, using priors^{prior_scale} and rescaling to average 1'.format(prior_scale=args.presoftmax_prior_scale_power))
counts_path = (os.path.dirname(args.targets_scp) + '/target_counts')
common_train_lib.compute_presoftmax_prior_scale_targets(args.dir, counts_path, presoftmax_prior_scale_power=args.presoftmax_prior_scale_power)
if (args.stage <= (- 1)):
logger.info('Preparing the initial network.')
common_train_lib.prepare_initial_network(args.dir, run_opts, args.srand, args.input_model)
num_archives_expanded = (num_archives * args.frames_per_eg)
num_archives_to_process = int((args.num_epochs * num_archives_expanded))
num_archives_processed = 0
num_iters = int(((num_archives_to_process * 2) / (args.num_jobs_initial + args.num_jobs_final)))
if args.do_final_combination:
models_to_combine = common_train_lib.get_model_combine_iters(num_iters, args.num_epochs, num_archives_expanded, args.max_models_combine, args.num_jobs_final)
else:
models_to_combine = None
if os.path.exists('{0}/valid_diagnostic.scp'.format(egs_dir)):
if os.path.exists('{0}/valid_diagnostic.egs'.format(egs_dir)):
raise Exception('both {0}/valid_diagnostic.egs and {0}/valid_diagnostic.scp exist.This script expects only one of them to exist.'.format(egs_dir))
use_multitask_egs = True
else:
if (not os.path.exists('{0}/valid_diagnostic.egs'.format(egs_dir))):
raise Exception('neither {0}/valid_diagnostic.egs nor {0}/valid_diagnostic.scp exist.This script expects one of them.'.format(egs_dir))
use_multitask_egs = False
logger.info('Training will run for {0} epochs = {1} iterations'.format(args.num_epochs, num_iters))
for iter in range(num_iters):
if ((args.exit_stage is not None) and (iter == args.exit_stage)):
logger.info('Exiting early due to --exit-stage {0}'.format(iter))
return
current_num_jobs = common_train_lib.get_current_num_jobs(iter, num_iters, args.num_jobs_initial, args.num_jobs_step, args.num_jobs_final)
if (args.stage <= iter):
lrate = common_train_lib.get_learning_rate(iter, current_num_jobs, num_iters, num_archives_processed, num_archives_to_process, args.initial_effective_lrate, args.final_effective_lrate)
shrinkage_value = (1.0 - (args.proportional_shrink * lrate))
if (shrinkage_value <= 0.5):
raise Exception('proportional-shrink={0} is too large, it gives shrink-value={1}'.format(args.proportional_shrink, shrinkage_value))
percent = ((num_archives_processed * 100.0) / num_archives_to_process)
epoch = ((num_archives_processed * args.num_epochs) / num_archives_to_process)
shrink_info_str =
if (shrinkage_value != 1.0):
shrink_info_str = 'shrink: {0:0.5f}'.format(shrinkage_value)
logger.info('Iter: {0}/{1} Jobs: {2} Epoch: {3:0.2f}/{4:0.1f} ({5:0.1f}% complete) lr: {6:0.6f} {7}'.format(iter, (num_iters - 1), current_num_jobs, epoch, args.num_epochs, percent, lrate, shrink_info_str))
train_lib.common.train_one_iteration(dir=args.dir, iter=iter, srand=args.srand, egs_dir=egs_dir, num_jobs=current_num_jobs, num_archives_processed=num_archives_processed, num_archives=num_archives, learning_rate=lrate, dropout_edit_string=common_train_lib.get_dropout_edit_string(args.dropout_schedule, (float(num_archives_processed) / num_archives_to_process), iter), train_opts=' '.join(args.train_opts), minibatch_size_str=args.minibatch_size, frames_per_eg=args.frames_per_eg, momentum=args.momentum, max_param_change=args.max_param_change, shrinkage_value=shrinkage_value, shuffle_buffer_size=args.shuffle_buffer_size, run_opts=run_opts, get_raw_nnet_from_am=False, image_augmentation_opts=args.image_augmentation_opts, use_multitask_egs=use_multitask_egs, backstitch_training_scale=args.backstitch_training_scale, backstitch_training_interval=args.backstitch_training_interval)
if args.cleanup:
common_train_lib.remove_model(args.dir, (iter - 2), num_iters, models_to_combine, args.preserve_model_interval, get_raw_nnet_from_am=False)
if (args.email is not None):
reporting_iter_interval = (num_iters * args.reporting_interval)
if ((iter % reporting_iter_interval) == 0):
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(args.dir)
message = report
subject = 'Update : Expt {dir} : Iter {iter}'.format(dir=args.dir, iter=iter)
common_lib.send_mail(message, subject, args.email)
num_archives_processed = (num_archives_processed + current_num_jobs)
if (args.stage <= num_iters):
if args.do_final_combination:
logger.info('Doing final combination to produce final.raw')
train_lib.common.combine_models(dir=args.dir, num_iters=num_iters, models_to_combine=models_to_combine, egs_dir=egs_dir, minibatch_size_str=args.minibatch_size, run_opts=run_opts, get_raw_nnet_from_am=False, max_objective_evaluations=args.max_objective_evaluations, use_multitask_egs=use_multitask_egs)
else:
common_lib.force_symlink('{0}.raw'.format(num_iters), '{0}/final.raw'.format(args.dir))
if (args.compute_average_posteriors and (args.stage <= (num_iters + 1))):
logger.info("Getting average posterior for output-node 'output'.")
train_lib.common.compute_average_posterior(dir=args.dir, iter='final', egs_dir=egs_dir, num_archives=num_archives, prior_subset_size=args.prior_subset_size, run_opts=run_opts, get_raw_nnet_from_am=False)
if args.cleanup:
logger.info('Cleaning up the experiment directory {0}'.format(args.dir))
remove_egs = args.remove_egs
if (args.egs_dir is not None):
remove_egs = False
common_train_lib.clean_nnet_dir(nnet_dir=args.dir, num_iters=num_iters, egs_dir=egs_dir, preserve_model_interval=args.preserve_model_interval, remove_egs=remove_egs, get_raw_nnet_from_am=False)
outputs_list = common_train_lib.get_outputs_list('{0}/final.raw'.format(args.dir), get_raw_nnet_from_am=False)
if ('output' in outputs_list):
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(args.dir)
if (args.email is not None):
common_lib.send_mail(report, 'Update : Expt {0} : complete'.format(args.dir), args.email)
with open('{dir}/accuracy.{output_name}.report'.format(dir=args.dir, output_name='output'), 'w') as f:
f.write(report)
common_lib.execute_command('steps/info/nnet3_dir_info.pl {0}'.format(args.dir)) |
def mock_get_activity_streams(streams_file):
"\n @TODO: I needed to mock the behavior the `stravalib.client.get_activity_streams`,\n it isn't the best alternative for mock the request from strava by passing a json file.\n "
stream_mock = MockResponse(streams_file).json()
entities = {}
for (key, value) in stream_mock.items():
value['type'] = key
stream = Stream.deserialize(value)
entities[stream.type] = stream
return entities | -8,311,206,817,446,089,000 | @TODO: I needed to mock the behavior the `stravalib.client.get_activity_streams`,
it isn't the best alternative for mock the request from strava by passing a json file. | runpandas/tests/test_strava_parser.py | mock_get_activity_streams | bitner/runpandas | python | def mock_get_activity_streams(streams_file):
"\n @TODO: I needed to mock the behavior the `stravalib.client.get_activity_streams`,\n it isn't the best alternative for mock the request from strava by passing a json file.\n "
stream_mock = MockResponse(streams_file).json()
entities = {}
for (key, value) in stream_mock.items():
value['type'] = key
stream = Stream.deserialize(value)
entities[stream.type] = stream
return entities |
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_collate_checkpoint(tmpdir):
'Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file.'
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, 'model.pt')
checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
trainer.strategy.barrier()
if trainer.is_global_zero:
output_path = os.path.join(tmpdir, 'single_model.pt')
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, output_path)
_assert_checkpoint_equal(model, output_path) | 6,974,235,595,776,155,000 | Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file. | tests/utilities/test_deepspeed_collate_checkpoint.py | test_deepspeed_collate_checkpoint | Borda/pytorch-lightning | python | @RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_collate_checkpoint(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, 'model.pt')
checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
trainer.strategy.barrier()
if trainer.is_global_zero:
output_path = os.path.join(tmpdir, 'single_model.pt')
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, output_path)
_assert_checkpoint_equal(model, output_path) |
@property
def ExperimenterData(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tstr\n\t\t'
return self._get_attribute('experimenterData') | 3,597,934,062,364,696,000 | NOT DEFINED
Returns:
str | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | ExperimenterData | kakkotetsu/IxNetwork | python | @property
def ExperimenterData(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tstr\n\t\t'
return self._get_attribute('experimenterData') |
@property
def ExperimenterDataLength(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tnumber\n\t\t'
return self._get_attribute('experimenterDataLength') | -5,109,781,219,003,124,000 | NOT DEFINED
Returns:
number | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | ExperimenterDataLength | kakkotetsu/IxNetwork | python | @property
def ExperimenterDataLength(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tnumber\n\t\t'
return self._get_attribute('experimenterDataLength') |
@property
def ExperimenterId(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tnumber\n\t\t'
return self._get_attribute('experimenterId') | -575,094,733,073,153,400 | NOT DEFINED
Returns:
number | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | ExperimenterId | kakkotetsu/IxNetwork | python | @property
def ExperimenterId(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tnumber\n\t\t'
return self._get_attribute('experimenterId') |
@property
def NextTableIds(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tstr\n\t\t'
return self._get_attribute('nextTableIds') | -2,984,752,472,315,336,700 | NOT DEFINED
Returns:
str | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | NextTableIds | kakkotetsu/IxNetwork | python | @property
def NextTableIds(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tstr\n\t\t'
return self._get_attribute('nextTableIds') |
@property
def Property(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tstr\n\t\t'
return self._get_attribute('property') | 8,491,170,648,330,608,000 | NOT DEFINED
Returns:
str | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | Property | kakkotetsu/IxNetwork | python | @property
def Property(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tstr\n\t\t'
return self._get_attribute('property') |
@property
def SupportedField(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tstr\n\t\t'
return self._get_attribute('supportedField') | -1,446,900,343,078,529,300 | NOT DEFINED
Returns:
str | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | SupportedField | kakkotetsu/IxNetwork | python | @property
def SupportedField(self):
'NOT DEFINED\n\n\t\tReturns:\n\t\t\tstr\n\t\t'
return self._get_attribute('supportedField') |
def find(self, ExperimenterData=None, ExperimenterDataLength=None, ExperimenterId=None, NextTableIds=None, Property=None, SupportedField=None):
'Finds and retrieves writeActionsMissLearnedInfo data from the server.\n\n\t\tAll named parameters support regex and can be used to selectively retrieve writeActionsMissLearnedInfo data from the server.\n\t\tBy default the find method takes no parameters and will retrieve all writeActionsMissLearnedInfo data from the server.\n\n\t\tArgs:\n\t\t\tExperimenterData (str): NOT DEFINED\n\t\t\tExperimenterDataLength (number): NOT DEFINED\n\t\t\tExperimenterId (number): NOT DEFINED\n\t\t\tNextTableIds (str): NOT DEFINED\n\t\t\tProperty (str): NOT DEFINED\n\t\t\tSupportedField (str): NOT DEFINED\n\n\t\tReturns:\n\t\t\tself: This instance with matching writeActionsMissLearnedInfo data retrieved from the server available through an iterator or index\n\n\t\tRaises:\n\t\t\tServerError: The server has encountered an uncategorized error condition\n\t\t'
return self._select(locals()) | -6,545,267,429,349,218,000 | Finds and retrieves writeActionsMissLearnedInfo data from the server.
All named parameters support regex and can be used to selectively retrieve writeActionsMissLearnedInfo data from the server.
By default the find method takes no parameters and will retrieve all writeActionsMissLearnedInfo data from the server.
Args:
ExperimenterData (str): NOT DEFINED
ExperimenterDataLength (number): NOT DEFINED
ExperimenterId (number): NOT DEFINED
NextTableIds (str): NOT DEFINED
Property (str): NOT DEFINED
SupportedField (str): NOT DEFINED
Returns:
self: This instance with matching writeActionsMissLearnedInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | find | kakkotetsu/IxNetwork | python | def find(self, ExperimenterData=None, ExperimenterDataLength=None, ExperimenterId=None, NextTableIds=None, Property=None, SupportedField=None):
'Finds and retrieves writeActionsMissLearnedInfo data from the server.\n\n\t\tAll named parameters support regex and can be used to selectively retrieve writeActionsMissLearnedInfo data from the server.\n\t\tBy default the find method takes no parameters and will retrieve all writeActionsMissLearnedInfo data from the server.\n\n\t\tArgs:\n\t\t\tExperimenterData (str): NOT DEFINED\n\t\t\tExperimenterDataLength (number): NOT DEFINED\n\t\t\tExperimenterId (number): NOT DEFINED\n\t\t\tNextTableIds (str): NOT DEFINED\n\t\t\tProperty (str): NOT DEFINED\n\t\t\tSupportedField (str): NOT DEFINED\n\n\t\tReturns:\n\t\t\tself: This instance with matching writeActionsMissLearnedInfo data retrieved from the server available through an iterator or index\n\n\t\tRaises:\n\t\t\tServerError: The server has encountered an uncategorized error condition\n\t\t'
return self._select(locals()) |
def read(self, href):
'Retrieves a single instance of writeActionsMissLearnedInfo data from the server.\n\n\t\tArgs:\n\t\t\thref (str): An href to the instance to be retrieved\n\n\t\tReturns:\n\t\t\tself: This instance with the writeActionsMissLearnedInfo data from the server available through an iterator or index\n\n\t\tRaises:\n\t\t\tNotFoundError: The requested resource does not exist on the server\n\t\t\tServerError: The server has encountered an uncategorized error condition\n\t\t'
return self._read(href) | 3,049,726,136,629,737,500 | Retrieves a single instance of writeActionsMissLearnedInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the writeActionsMissLearnedInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | read | kakkotetsu/IxNetwork | python | def read(self, href):
'Retrieves a single instance of writeActionsMissLearnedInfo data from the server.\n\n\t\tArgs:\n\t\t\thref (str): An href to the instance to be retrieved\n\n\t\tReturns:\n\t\t\tself: This instance with the writeActionsMissLearnedInfo data from the server available through an iterator or index\n\n\t\tRaises:\n\t\t\tNotFoundError: The requested resource does not exist on the server\n\t\t\tServerError: The server has encountered an uncategorized error condition\n\t\t'
return self._read(href) |
def command_line_interface(root_path):
'\n A simple command-line interface for running a tool to resample a library of template spectra onto fixed\n logarithmic rasters representing each of the 4MOST arms.\n\n We use the python argparse module to build the interface, and return the inputs supplied by the user.\n\n :param root_path:\n The root path of this 4GP installation; the directory where we can find 4FS.\n\n :return:\n An object containing the arguments supplied by the user.\n '
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--templates-in', required=False, default='turbospec_rv_templates', dest='templates_in', help='Library of spectra to use as templates for RV code')
parser.add_argument('--workspace', dest='workspace', default='', help='Directory where we expect to find spectrum libraries')
parser.add_argument('--templates-out', required=False, default='rv_templates_resampled', dest='templates_out', help='Library into which to place resampled templates for RV code')
parser.add_argument('--binary-path', required=False, default=root_path, dest='binary_path', help='Specify a directory where 4FS binary package is installed')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s', datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info('Resampling template spectra')
return args | -1,154,686,518,924,430,000 | A simple command-line interface for running a tool to resample a library of template spectra onto fixed
logarithmic rasters representing each of the 4MOST arms.
We use the python argparse module to build the interface, and return the inputs supplied by the user.
:param root_path:
The root path of this 4GP installation; the directory where we can find 4FS.
:return:
An object containing the arguments supplied by the user. | src/pythonModules/fourgp_rv/fourgp_rv/templates_resample.py | command_line_interface | dcf21/4most-4gp | python | def command_line_interface(root_path):
'\n A simple command-line interface for running a tool to resample a library of template spectra onto fixed\n logarithmic rasters representing each of the 4MOST arms.\n\n We use the python argparse module to build the interface, and return the inputs supplied by the user.\n\n :param root_path:\n The root path of this 4GP installation; the directory where we can find 4FS.\n\n :return:\n An object containing the arguments supplied by the user.\n '
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--templates-in', required=False, default='turbospec_rv_templates', dest='templates_in', help='Library of spectra to use as templates for RV code')
parser.add_argument('--workspace', dest='workspace', default=, help='Directory where we expect to find spectrum libraries')
parser.add_argument('--templates-out', required=False, default='rv_templates_resampled', dest='templates_out', help='Library into which to place resampled templates for RV code')
parser.add_argument('--binary-path', required=False, default=root_path, dest='binary_path', help='Specify a directory where 4FS binary package is installed')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s', datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info('Resampling template spectra')
return args |
def logarithmic_raster(lambda_min, lambda_max, lambda_step):
'\n Create a logarithmic raster with a fixed logarithmic stride, based on a starting wavelength, finishing wavelength,\n and a mean wavelength step.\n\n :param lambda_min:\n Smallest wavelength in raster.\n :param lambda_max:\n Largest wavelength in raster.\n :param lambda_step:\n The approximate pixel size in the raster.\n :return:\n A numpy array containing a wavelength raster with fixed logarithmic stride.\n '
return np.exp(np.arange(np.log(lambda_min), np.log(lambda_max), np.log((1 + (lambda_step / lambda_min))))) | 6,637,842,274,579,725,000 | Create a logarithmic raster with a fixed logarithmic stride, based on a starting wavelength, finishing wavelength,
and a mean wavelength step.
:param lambda_min:
Smallest wavelength in raster.
:param lambda_max:
Largest wavelength in raster.
:param lambda_step:
The approximate pixel size in the raster.
:return:
A numpy array containing a wavelength raster with fixed logarithmic stride. | src/pythonModules/fourgp_rv/fourgp_rv/templates_resample.py | logarithmic_raster | dcf21/4most-4gp | python | def logarithmic_raster(lambda_min, lambda_max, lambda_step):
'\n Create a logarithmic raster with a fixed logarithmic stride, based on a starting wavelength, finishing wavelength,\n and a mean wavelength step.\n\n :param lambda_min:\n Smallest wavelength in raster.\n :param lambda_max:\n Largest wavelength in raster.\n :param lambda_step:\n The approximate pixel size in the raster.\n :return:\n A numpy array containing a wavelength raster with fixed logarithmic stride.\n '
return np.exp(np.arange(np.log(lambda_min), np.log(lambda_max), np.log((1 + (lambda_step / lambda_min))))) |
def resample_templates(args, logger):
'\n Resample a spectrum library of templates onto a fixed logarithmic stride, representing each of the 4MOST arms in\n turn. We use 4FS to down-sample the templates to the resolution of 4MOST observations, and automatically detect\n the list of arms contained within each 4FS mock observation. We then resample the 4FS output onto a new raster\n with fixed logarithmic stride.\n\n :param args:\n Object containing arguments supplied by the used, for example the name of the spectrum libraries we use for\n input and output. The required fields are defined by the user interface above.\n :param logger:\n A python logging object.\n :return:\n None.\n '
workspace = (args.workspace if args.workspace else os_path.join(args.our_path, '../../../workspace'))
spectra = SpectrumLibrarySqlite.open_and_search(library_spec=args.templates_in, workspace=workspace, extra_constraints={'continuum_normalised': 0})
(templates_library, templates_library_items, templates_spectra_constraints) = [spectra[i] for i in ('library', 'items', 'constraints')]
library_path = os_path.join(workspace, args.templates_out)
output_library = SpectrumLibrarySqlite(path=library_path, create=True)
etc_wrapper = FourFS(path_to_4fs=os_path.join(args.binary_path, 'OpSys/ETC'), snr_list=[250.0], magnitude=13, snr_per_pixel=True)
for input_spectrum_id in templates_library_items:
logger.info('Working on <{}>'.format(input_spectrum_id['filename']))
input_spectrum_array = templates_library.open(ids=input_spectrum_id['specId'])
template_flux_normalised = input_spectrum_array.extract_item(0)
spectrum_matching_field = ('uid' if ('uid' in template_flux_normalised.metadata) else 'Starname')
object_name = template_flux_normalised.metadata[spectrum_matching_field]
search_criteria = {spectrum_matching_field: object_name, 'continuum_normalised': 1}
continuum_normalised_spectrum_id = templates_library.search(**search_criteria)
assert (len(continuum_normalised_spectrum_id) == 1), 'Could not find continuum-normalised spectrum.'
template_continuum_normalised_arr = templates_library.open(ids=continuum_normalised_spectrum_id[0]['specId'])
template_continuum_normalised = template_continuum_normalised_arr.extract_item(0)
logger.info('Passing template through 4FS')
mock_observed_template = etc_wrapper.process_spectra(spectra_list=((template_flux_normalised, template_continuum_normalised),))
for mode in mock_observed_template:
for index in mock_observed_template[mode]:
for snr in mock_observed_template[mode][index]:
unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16]
for spectrum_type in mock_observed_template[mode][index][snr]:
logger.info('Resampling {} spectrum'.format(mode))
mock_observed = mock_observed_template[mode][index][snr][spectrum_type]
mock_observed.value_errors[np.isnan(mock_observed.value_errors)] = 1000.0
if (not np.all(np.isfinite(mock_observed.values))):
print('Warning: NaN values in template <{}>'.format(template_flux_normalised.metadata['Starname']))
mock_observed.value_errors[np.isnan(mock_observed.values)] = 1000.0
mock_observed.values[np.isnan(mock_observed.values)] = 1.0
resampler = SpectrumResampler(mock_observed)
wavelength_arms = SpectrumProperties(mock_observed.wavelengths).wavelength_arms()
for (arm_count, arm) in enumerate(wavelength_arms['wavelength_arms']):
(arm_raster, mean_pixel_width) = arm
name = '{}_{}'.format(mode, arm_count)
arm_info = {'lambda_min': arm_raster[0], 'lambda_max': arm_raster[(- 1)], 'lambda_step': mean_pixel_width}
arm_raster = logarithmic_raster(lambda_min=arm_info['lambda_min'], lambda_max=arm_info['lambda_max'], lambda_step=arm_info['lambda_step'])
mock_observed_arm = resampler.onto_raster(arm_raster)
output_library.insert(spectra=mock_observed_arm, filenames=input_spectrum_id['filename'], metadata_list={'uid': unique_id, 'template_id': object_name, 'mode': mode, 'arm_name': '{}_{}'.format(mode, arm_count), 'lambda_min': arm_raster[0], 'lambda_max': arm_raster[(- 1)], 'lambda_step': mean_pixel_width}) | -6,798,017,792,568,985,000 | Resample a spectrum library of templates onto a fixed logarithmic stride, representing each of the 4MOST arms in
turn. We use 4FS to down-sample the templates to the resolution of 4MOST observations, and automatically detect
the list of arms contained within each 4FS mock observation. We then resample the 4FS output onto a new raster
with fixed logarithmic stride.
:param args:
Object containing arguments supplied by the used, for example the name of the spectrum libraries we use for
input and output. The required fields are defined by the user interface above.
:param logger:
A python logging object.
:return:
None. | src/pythonModules/fourgp_rv/fourgp_rv/templates_resample.py | resample_templates | dcf21/4most-4gp | python | def resample_templates(args, logger):
'\n Resample a spectrum library of templates onto a fixed logarithmic stride, representing each of the 4MOST arms in\n turn. We use 4FS to down-sample the templates to the resolution of 4MOST observations, and automatically detect\n the list of arms contained within each 4FS mock observation. We then resample the 4FS output onto a new raster\n with fixed logarithmic stride.\n\n :param args:\n Object containing arguments supplied by the used, for example the name of the spectrum libraries we use for\n input and output. The required fields are defined by the user interface above.\n :param logger:\n A python logging object.\n :return:\n None.\n '
workspace = (args.workspace if args.workspace else os_path.join(args.our_path, '../../../workspace'))
spectra = SpectrumLibrarySqlite.open_and_search(library_spec=args.templates_in, workspace=workspace, extra_constraints={'continuum_normalised': 0})
(templates_library, templates_library_items, templates_spectra_constraints) = [spectra[i] for i in ('library', 'items', 'constraints')]
library_path = os_path.join(workspace, args.templates_out)
output_library = SpectrumLibrarySqlite(path=library_path, create=True)
etc_wrapper = FourFS(path_to_4fs=os_path.join(args.binary_path, 'OpSys/ETC'), snr_list=[250.0], magnitude=13, snr_per_pixel=True)
for input_spectrum_id in templates_library_items:
logger.info('Working on <{}>'.format(input_spectrum_id['filename']))
input_spectrum_array = templates_library.open(ids=input_spectrum_id['specId'])
template_flux_normalised = input_spectrum_array.extract_item(0)
spectrum_matching_field = ('uid' if ('uid' in template_flux_normalised.metadata) else 'Starname')
object_name = template_flux_normalised.metadata[spectrum_matching_field]
search_criteria = {spectrum_matching_field: object_name, 'continuum_normalised': 1}
continuum_normalised_spectrum_id = templates_library.search(**search_criteria)
assert (len(continuum_normalised_spectrum_id) == 1), 'Could not find continuum-normalised spectrum.'
template_continuum_normalised_arr = templates_library.open(ids=continuum_normalised_spectrum_id[0]['specId'])
template_continuum_normalised = template_continuum_normalised_arr.extract_item(0)
logger.info('Passing template through 4FS')
mock_observed_template = etc_wrapper.process_spectra(spectra_list=((template_flux_normalised, template_continuum_normalised),))
for mode in mock_observed_template:
for index in mock_observed_template[mode]:
for snr in mock_observed_template[mode][index]:
unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16]
for spectrum_type in mock_observed_template[mode][index][snr]:
logger.info('Resampling {} spectrum'.format(mode))
mock_observed = mock_observed_template[mode][index][snr][spectrum_type]
mock_observed.value_errors[np.isnan(mock_observed.value_errors)] = 1000.0
if (not np.all(np.isfinite(mock_observed.values))):
print('Warning: NaN values in template <{}>'.format(template_flux_normalised.metadata['Starname']))
mock_observed.value_errors[np.isnan(mock_observed.values)] = 1000.0
mock_observed.values[np.isnan(mock_observed.values)] = 1.0
resampler = SpectrumResampler(mock_observed)
wavelength_arms = SpectrumProperties(mock_observed.wavelengths).wavelength_arms()
for (arm_count, arm) in enumerate(wavelength_arms['wavelength_arms']):
(arm_raster, mean_pixel_width) = arm
name = '{}_{}'.format(mode, arm_count)
arm_info = {'lambda_min': arm_raster[0], 'lambda_max': arm_raster[(- 1)], 'lambda_step': mean_pixel_width}
arm_raster = logarithmic_raster(lambda_min=arm_info['lambda_min'], lambda_max=arm_info['lambda_max'], lambda_step=arm_info['lambda_step'])
mock_observed_arm = resampler.onto_raster(arm_raster)
output_library.insert(spectra=mock_observed_arm, filenames=input_spectrum_id['filename'], metadata_list={'uid': unique_id, 'template_id': object_name, 'mode': mode, 'arm_name': '{}_{}'.format(mode, arm_count), 'lambda_min': arm_raster[0], 'lambda_max': arm_raster[(- 1)], 'lambda_step': mean_pixel_width}) |
def reset(self):
" Reset detectors\n\n Resets statistics and adwin's window.\n\n Returns\n -------\n ADWIN\n self\n\n "
self.__init__(delta=self.delta) | 3,523,812,558,410,450,000 | Reset detectors
Resets statistics and adwin's window.
Returns
-------
ADWIN
self | src/skmultiflow/drift_detection/adwin.py | reset | denisesato/scikit-multiflow | python | def reset(self):
" Reset detectors\n\n Resets statistics and adwin's window.\n\n Returns\n -------\n ADWIN\n self\n\n "
self.__init__(delta=self.delta) |
def get_change(self):
' Get drift\n\n Returns\n -------\n bool\n Whether or not a drift occurred\n\n '
return self.bln_bucket_deleted | 5,362,007,572,281,735,000 | Get drift
Returns
-------
bool
Whether or not a drift occurred | src/skmultiflow/drift_detection/adwin.py | get_change | denisesato/scikit-multiflow | python | def get_change(self):
' Get drift\n\n Returns\n -------\n bool\n Whether or not a drift occurred\n\n '
return self.bln_bucket_deleted |
def __init_buckets(self):
" Initialize the bucket's List and statistics\n\n Set all statistics to 0 and create a new bucket List.\n\n "
self.list_row_bucket = List()
self.last_bucket_row = 0
self._total = 0
self._variance = 0
self._width = 0
self.bucket_number = 0 | 1,576,856,208,439,225,900 | Initialize the bucket's List and statistics
Set all statistics to 0 and create a new bucket List. | src/skmultiflow/drift_detection/adwin.py | __init_buckets | denisesato/scikit-multiflow | python | def __init_buckets(self):
" Initialize the bucket's List and statistics\n\n Set all statistics to 0 and create a new bucket List.\n\n "
self.list_row_bucket = List()
self.last_bucket_row = 0
self._total = 0
self._variance = 0
self._width = 0
self.bucket_number = 0 |
def add_element(self, value):
" Add a new element to the sample window.\n\n Apart from adding the element value to the window, by inserting it in\n the correct bucket, it will also update the relevant statistics, in\n this case the total sum of all values, the window width and the total\n variance.\n\n Parameters\n ----------\n value: int or float (a numeric value)\n\n Notes\n -----\n The value parameter can be any numeric value relevant to the analysis\n of concept change. For the learners in this framework we are using\n either 0's or 1's, that are interpreted as follows:\n 0: Means the learners prediction was wrong\n 1: Means the learners prediction was correct\n\n This function should be used at every new sample analysed.\n\n "
if self.in_concept_change:
self.reset()
self._width += 1
self.__insert_element_bucket(0, value, self.list_row_bucket.first)
incremental_variance = 0
if (self._width > 1):
incremental_variance = ((((self._width - 1) * (value - (self._total / (self._width - 1)))) * (value - (self._total / (self._width - 1)))) / self._width)
self._variance += incremental_variance
self._total += value
self.__compress_buckets() | -116,700,837,709,506,830 | Add a new element to the sample window.
Apart from adding the element value to the window, by inserting it in
the correct bucket, it will also update the relevant statistics, in
this case the total sum of all values, the window width and the total
variance.
Parameters
----------
value: int or float (a numeric value)
Notes
-----
The value parameter can be any numeric value relevant to the analysis
of concept change. For the learners in this framework we are using
either 0's or 1's, that are interpreted as follows:
0: Means the learners prediction was wrong
1: Means the learners prediction was correct
This function should be used at every new sample analysed. | src/skmultiflow/drift_detection/adwin.py | add_element | denisesato/scikit-multiflow | python | def add_element(self, value):
" Add a new element to the sample window.\n\n Apart from adding the element value to the window, by inserting it in\n the correct bucket, it will also update the relevant statistics, in\n this case the total sum of all values, the window width and the total\n variance.\n\n Parameters\n ----------\n value: int or float (a numeric value)\n\n Notes\n -----\n The value parameter can be any numeric value relevant to the analysis\n of concept change. For the learners in this framework we are using\n either 0's or 1's, that are interpreted as follows:\n 0: Means the learners prediction was wrong\n 1: Means the learners prediction was correct\n\n This function should be used at every new sample analysed.\n\n "
if self.in_concept_change:
self.reset()
self._width += 1
self.__insert_element_bucket(0, value, self.list_row_bucket.first)
incremental_variance = 0
if (self._width > 1):
incremental_variance = ((((self._width - 1) * (value - (self._total / (self._width - 1)))) * (value - (self._total / (self._width - 1)))) / self._width)
self._variance += incremental_variance
self._total += value
self.__compress_buckets() |
def delete_element(self):
' Delete an Item from the bucket list.\n\n Deletes the last Item and updates relevant statistics kept by ADWIN.\n\n Returns\n -------\n int\n The bucket size from the updated bucket\n\n '
node = self.list_row_bucket.last
n1 = self.bucket_size(self.last_bucket_row)
self._width -= n1
self._total -= node.get_total(0)
u1 = (node.get_total(0) / n1)
incremental_variance = (node.get_variance(0) + ((((n1 * self._width) * (u1 - (self._total / self._width))) * (u1 - (self._total / self._width))) / (n1 + self._width)))
self._variance -= incremental_variance
node.remove_bucket()
self.bucket_number -= 1
if (node.bucket_size_row == 0):
self.list_row_bucket.remove_from_tail()
self.last_bucket_row -= 1
return n1 | -1,062,732,316,874,703,100 | Delete an Item from the bucket list.
Deletes the last Item and updates relevant statistics kept by ADWIN.
Returns
-------
int
The bucket size from the updated bucket | src/skmultiflow/drift_detection/adwin.py | delete_element | denisesato/scikit-multiflow | python | def delete_element(self):
' Delete an Item from the bucket list.\n\n Deletes the last Item and updates relevant statistics kept by ADWIN.\n\n Returns\n -------\n int\n The bucket size from the updated bucket\n\n '
node = self.list_row_bucket.last
n1 = self.bucket_size(self.last_bucket_row)
self._width -= n1
self._total -= node.get_total(0)
u1 = (node.get_total(0) / n1)
incremental_variance = (node.get_variance(0) + ((((n1 * self._width) * (u1 - (self._total / self._width))) * (u1 - (self._total / self._width))) / (n1 + self._width)))
self._variance -= incremental_variance
node.remove_bucket()
self.bucket_number -= 1
if (node.bucket_size_row == 0):
self.list_row_bucket.remove_from_tail()
self.last_bucket_row -= 1
return n1 |
def detected_change(self):
" Detects concept change in a drifting data stream.\n\n The ADWIN algorithm is described in Bifet and Gavaldà's 'Learning from\n Time-Changing Data with Adaptive Windowing'. The general idea is to keep\n statistics from a window of variable size while detecting concept drift.\n\n This function is responsible for analysing different cutting points in\n the sliding window, to verify if there is a significant change in concept.\n\n Returns\n -------\n bln_change : bool\n Whether change was detected or not\n\n Notes\n -----\n If change was detected, one should verify the new window size, by reading\n the width property.\n\n "
bln_change = False
bln_exit = False
bln_bucket_deleted = False
self.mint_time += 1
n0 = 0
if (((self.mint_time % self.mint_clock) == 0) and (self.width > self.mint_min_window_longitude)):
bln_reduce_width = True
while bln_reduce_width:
bln_reduce_width = (not bln_reduce_width)
bln_exit = False
n0 = 0
n1 = self._width
u0 = 0
u1 = self.total
v0 = 0
v1 = self._variance
n2 = 0
u2 = 0
cursor = self.list_row_bucket.last
i = self.last_bucket_row
while ((not bln_exit) and (cursor is not None)):
for k in range(cursor.bucket_size_row):
n2 = self.bucket_size(i)
u2 = cursor.get_total(k)
if (n0 > 0):
v0 += (cursor.get_variance(k) + (((((1.0 * n0) * n2) * ((u0 / n0) - (u2 / n2))) * ((u0 / n0) - (u2 / n2))) / (n0 + n2)))
if (n1 > 0):
v1 -= (cursor.get_variance(k) + (((((1.0 * n1) * n2) * ((u1 / n1) - (u2 / n2))) * ((u1 / n1) - (u2 / n2))) / (n1 + n2)))
n0 += self.bucket_size(i)
n1 -= self.bucket_size(i)
u0 += cursor.get_total(k)
u1 -= cursor.get_total(k)
if ((i == 0) and (k == (cursor.bucket_size_row - 1))):
bln_exit = True
break
abs_value = (1.0 * ((u0 / n0) - (u1 / n1)))
if ((n1 >= self.mint_min_window_length) and (n0 >= self.mint_min_window_length) and self.__bln_cut_expression(n0, n1, u0, u1, v0, v1, abs_value, self.delta)):
bln_bucket_deleted = True
self.detect = self.mint_time
if (self.detect == 0):
self.detect = self.mint_time
elif (self.detect_twice == 0):
self.detect_twice = self.mint_time
bln_reduce_width = True
bln_change = True
if (self.width > 0):
n0 -= self.delete_element()
bln_exit = True
break
cursor = cursor.get_previous()
i -= 1
self.mdbl_width += self.width
if bln_change:
self._n_detections += 1
self.in_concept_change = bln_change
return bln_change | 5,073,971,440,688,084,000 | Detects concept change in a drifting data stream.
The ADWIN algorithm is described in Bifet and Gavaldà's 'Learning from
Time-Changing Data with Adaptive Windowing'. The general idea is to keep
statistics from a window of variable size while detecting concept drift.
This function is responsible for analysing different cutting points in
the sliding window, to verify if there is a significant change in concept.
Returns
-------
bln_change : bool
Whether change was detected or not
Notes
-----
If change was detected, one should verify the new window size, by reading
the width property. | src/skmultiflow/drift_detection/adwin.py | detected_change | denisesato/scikit-multiflow | python | def detected_change(self):
" Detects concept change in a drifting data stream.\n\n The ADWIN algorithm is described in Bifet and Gavaldà's 'Learning from\n Time-Changing Data with Adaptive Windowing'. The general idea is to keep\n statistics from a window of variable size while detecting concept drift.\n\n This function is responsible for analysing different cutting points in\n the sliding window, to verify if there is a significant change in concept.\n\n Returns\n -------\n bln_change : bool\n Whether change was detected or not\n\n Notes\n -----\n If change was detected, one should verify the new window size, by reading\n the width property.\n\n "
bln_change = False
bln_exit = False
bln_bucket_deleted = False
self.mint_time += 1
n0 = 0
if (((self.mint_time % self.mint_clock) == 0) and (self.width > self.mint_min_window_longitude)):
bln_reduce_width = True
while bln_reduce_width:
bln_reduce_width = (not bln_reduce_width)
bln_exit = False
n0 = 0
n1 = self._width
u0 = 0
u1 = self.total
v0 = 0
v1 = self._variance
n2 = 0
u2 = 0
cursor = self.list_row_bucket.last
i = self.last_bucket_row
while ((not bln_exit) and (cursor is not None)):
for k in range(cursor.bucket_size_row):
n2 = self.bucket_size(i)
u2 = cursor.get_total(k)
if (n0 > 0):
v0 += (cursor.get_variance(k) + (((((1.0 * n0) * n2) * ((u0 / n0) - (u2 / n2))) * ((u0 / n0) - (u2 / n2))) / (n0 + n2)))
if (n1 > 0):
v1 -= (cursor.get_variance(k) + (((((1.0 * n1) * n2) * ((u1 / n1) - (u2 / n2))) * ((u1 / n1) - (u2 / n2))) / (n1 + n2)))
n0 += self.bucket_size(i)
n1 -= self.bucket_size(i)
u0 += cursor.get_total(k)
u1 -= cursor.get_total(k)
if ((i == 0) and (k == (cursor.bucket_size_row - 1))):
bln_exit = True
break
abs_value = (1.0 * ((u0 / n0) - (u1 / n1)))
if ((n1 >= self.mint_min_window_length) and (n0 >= self.mint_min_window_length) and self.__bln_cut_expression(n0, n1, u0, u1, v0, v1, abs_value, self.delta)):
bln_bucket_deleted = True
self.detect = self.mint_time
if (self.detect == 0):
self.detect = self.mint_time
elif (self.detect_twice == 0):
self.detect_twice = self.mint_time
bln_reduce_width = True
bln_change = True
if (self.width > 0):
n0 -= self.delete_element()
bln_exit = True
break
cursor = cursor.get_previous()
i -= 1
self.mdbl_width += self.width
if bln_change:
self._n_detections += 1
self.in_concept_change = bln_change
return bln_change |
def reset(self):
" Reset the algorithm's statistics and window\n\n Returns\n -------\n ADWIN\n self\n\n "
self.bucket_size_row = 0
for i in range((ADWIN.MAX_BUCKETS + 1)):
self.__clear_buckets(i)
return self | 1,662,833,394,584,415,000 | Reset the algorithm's statistics and window
Returns
-------
ADWIN
self | src/skmultiflow/drift_detection/adwin.py | reset | denisesato/scikit-multiflow | python | def reset(self):
" Reset the algorithm's statistics and window\n\n Returns\n -------\n ADWIN\n self\n\n "
self.bucket_size_row = 0
for i in range((ADWIN.MAX_BUCKETS + 1)):
self.__clear_buckets(i)
return self |
def set_dof(self, dof_value_map):
'\n dof_value_map: A dict that maps robot attribute name to a list of corresponding values\n '
if (not isinstance(self._geom, Robot)):
return
dof_val = self.env_body.GetActiveDOFValues()
for (k, v) in dof_value_map.items():
if ((k not in self._geom.dof_map) or np.any(np.isnan(v))):
continue
inds = self._geom.dof_map[k]
try:
dof_val[inds] = v
except IndexError:
print(('\n\n\nBad index in set dof:', inds, k, v, self._geom, '\n\n\n'))
self.env_body.SetActiveDOFValues(dof_val) | -6,041,207,392,714,247,000 | dof_value_map: A dict that maps robot attribute name to a list of corresponding values | opentamp/src/core/util_classes/no_openrave_body.py | set_dof | Algorithmic-Alignment-Lab/OpenTAMP | python | def set_dof(self, dof_value_map):
'\n \n '
if (not isinstance(self._geom, Robot)):
return
dof_val = self.env_body.GetActiveDOFValues()
for (k, v) in dof_value_map.items():
if ((k not in self._geom.dof_map) or np.any(np.isnan(v))):
continue
inds = self._geom.dof_map[k]
try:
dof_val[inds] = v
except IndexError:
print(('\n\n\nBad index in set dof:', inds, k, v, self._geom, '\n\n\n'))
self.env_body.SetActiveDOFValues(dof_val) |
def _set_active_dof_inds(self, inds=None):
'\n Set active dof index to the one we are interested\n This function is implemented to simplify jacobian calculation in the CollisionPredicate\n inds: Optional list of index specifying dof index we are interested in\n '
robot = self.env_body
if (inds == None):
dof_inds = np.ndarray(0, dtype=np.int)
if (robot.GetJoint('torso_lift_joint') != None):
dof_inds = np.r_[(dof_inds, robot.GetJoint('torso_lift_joint').GetDOFIndex())]
dof_inds = np.r_[(dof_inds, robot.GetManipulator('leftarm').GetArmIndices())]
dof_inds = np.r_[(dof_inds, robot.GetManipulator('leftarm').GetGripperIndices())]
dof_inds = np.r_[(dof_inds, robot.GetManipulator('rightarm').GetArmIndices())]
dof_inds = np.r_[(dof_inds, robot.GetManipulator('rightarm').GetGripperIndices())]
robot.SetActiveDOFs(dof_inds, ((DOFAffine.X + DOFAffine.Y) + DOFAffine.RotationAxis), [0, 0, 1])
else:
robot.SetActiveDOFs(inds) | -5,458,568,516,933,556,000 | Set active dof index to the one we are interested
This function is implemented to simplify jacobian calculation in the CollisionPredicate
inds: Optional list of index specifying dof index we are interested in | opentamp/src/core/util_classes/no_openrave_body.py | _set_active_dof_inds | Algorithmic-Alignment-Lab/OpenTAMP | python | def _set_active_dof_inds(self, inds=None):
'\n Set active dof index to the one we are interested\n This function is implemented to simplify jacobian calculation in the CollisionPredicate\n inds: Optional list of index specifying dof index we are interested in\n '
robot = self.env_body
if (inds == None):
dof_inds = np.ndarray(0, dtype=np.int)
if (robot.GetJoint('torso_lift_joint') != None):
dof_inds = np.r_[(dof_inds, robot.GetJoint('torso_lift_joint').GetDOFIndex())]
dof_inds = np.r_[(dof_inds, robot.GetManipulator('leftarm').GetArmIndices())]
dof_inds = np.r_[(dof_inds, robot.GetManipulator('leftarm').GetGripperIndices())]
dof_inds = np.r_[(dof_inds, robot.GetManipulator('rightarm').GetArmIndices())]
dof_inds = np.r_[(dof_inds, robot.GetManipulator('rightarm').GetGripperIndices())]
robot.SetActiveDOFs(dof_inds, ((DOFAffine.X + DOFAffine.Y) + DOFAffine.RotationAxis), [0, 0, 1])
else:
robot.SetActiveDOFs(inds) |
def __init__(self, rate_tables=None):
'RateTableResponse - a model defined in Swagger'
self._rate_tables = None
self.discriminator = None
if (rate_tables is not None):
self.rate_tables = rate_tables | 9,004,336,416,813,501,000 | RateTableResponse - a model defined in Swagger | src/ebay_rest/api/sell_account/models/rate_table_response.py | __init__ | craiga/ebay_rest | python | def __init__(self, rate_tables=None):
self._rate_tables = None
self.discriminator = None
if (rate_tables is not None):
self.rate_tables = rate_tables |
@property
def rate_tables(self):
'Gets the rate_tables of this RateTableResponse. # noqa: E501\n\n A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501\n\n :return: The rate_tables of this RateTableResponse. # noqa: E501\n :rtype: list[RateTable]\n '
return self._rate_tables | 5,818,470,464,169,381,000 | Gets the rate_tables of this RateTableResponse. # noqa: E501
A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501
:return: The rate_tables of this RateTableResponse. # noqa: E501
:rtype: list[RateTable] | src/ebay_rest/api/sell_account/models/rate_table_response.py | rate_tables | craiga/ebay_rest | python | @property
def rate_tables(self):
'Gets the rate_tables of this RateTableResponse. # noqa: E501\n\n A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501\n\n :return: The rate_tables of this RateTableResponse. # noqa: E501\n :rtype: list[RateTable]\n '
return self._rate_tables |
@rate_tables.setter
def rate_tables(self, rate_tables):
'Sets the rate_tables of this RateTableResponse.\n\n A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501\n\n :param rate_tables: The rate_tables of this RateTableResponse. # noqa: E501\n :type: list[RateTable]\n '
self._rate_tables = rate_tables | 205,337,510,896,969,600 | Sets the rate_tables of this RateTableResponse.
A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501
:param rate_tables: The rate_tables of this RateTableResponse. # noqa: E501
:type: list[RateTable] | src/ebay_rest/api/sell_account/models/rate_table_response.py | rate_tables | craiga/ebay_rest | python | @rate_tables.setter
def rate_tables(self, rate_tables):
'Sets the rate_tables of this RateTableResponse.\n\n A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501\n\n :param rate_tables: The rate_tables of this RateTableResponse. # noqa: E501\n :type: list[RateTable]\n '
self._rate_tables = rate_tables |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(RateTableResponse, dict):
for (key, value) in self.items():
result[key] = value
return result | -8,185,449,808,055,180,000 | Returns the model properties as a dict | src/ebay_rest/api/sell_account/models/rate_table_response.py | to_dict | craiga/ebay_rest | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(RateTableResponse, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | src/ebay_rest/api/sell_account/models/rate_table_response.py | to_str | craiga/ebay_rest | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | src/ebay_rest/api/sell_account/models/rate_table_response.py | __repr__ | craiga/ebay_rest | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, RateTableResponse)):
return False
return (self.__dict__ == other.__dict__) | -6,882,749,671,341,800,000 | Returns true if both objects are equal | src/ebay_rest/api/sell_account/models/rate_table_response.py | __eq__ | craiga/ebay_rest | python | def __eq__(self, other):
if (not isinstance(other, RateTableResponse)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | src/ebay_rest/api/sell_account/models/rate_table_response.py | __ne__ | craiga/ebay_rest | python | def __ne__(self, other):
return (not (self == other)) |
def _check_before_run(self):
'Check if all files are available before going deeper'
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir)) | -3,003,780,492,068,818,000 | Check if all files are available before going deeper | torchreid/datasets/dukemtmcvidreid.py | _check_before_run | ArronHZG/ABD-Net | python | def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir)) |
def __init__(self, t_prof, eval_env_bldr, chief_handle, evaluator_name, log_conf_interval=False):
'\n Args:\n t_prof (TrainingProfile)\n chief_handle (class instance or ray ActorHandle)\n evaluator_name (str): Name of the evaluator\n '
super().__init__(t_prof=t_prof)
self._eval_env_bldr = eval_env_bldr
self._chief_handle = chief_handle
self._is_multi_stack = (len(self._t_prof.eval_stack_sizes) > 1)
self._log_conf_interval = log_conf_interval
self._evaluator_name = evaluator_name
(self._exp_name_total, self._exp_names_conf) = self._create_experiments(self_name=evaluator_name)
if self._is_multi_stack:
self._exp_name_multi_stack = {eval_mode: self._ray.get(self._ray.remote(self._chief_handle.create_experiment, ((((((self._t_prof.name + ' ') + eval_mode) + 'Multi_Stack') + ': ') + evaluator_name) + ' Averaged Total'))) for eval_mode in self._t_prof.eval_modes_of_algo}
if self._log_conf_interval:
self._exp_names_multi_stack_conf = {eval_mode: self._ray.get([self._ray.remote(self._chief_handle.create_experiment, ((((((self._t_prof.name + ' ') + eval_mode) + ': ') + evaluator_name) + ' Conf_') + bound_end)) for bound_end in ['lower95', 'upper95']]) for eval_mode in self._t_prof.eval_modes_of_algo} | 6,732,575,788,846,942,000 | Args:
t_prof (TrainingProfile)
chief_handle (class instance or ray ActorHandle)
evaluator_name (str): Name of the evaluator | PokerRL/eval/_/EvaluatorMasterBase.py | __init__ | EricSteinberger/DREAM | python | def __init__(self, t_prof, eval_env_bldr, chief_handle, evaluator_name, log_conf_interval=False):
'\n Args:\n t_prof (TrainingProfile)\n chief_handle (class instance or ray ActorHandle)\n evaluator_name (str): Name of the evaluator\n '
super().__init__(t_prof=t_prof)
self._eval_env_bldr = eval_env_bldr
self._chief_handle = chief_handle
self._is_multi_stack = (len(self._t_prof.eval_stack_sizes) > 1)
self._log_conf_interval = log_conf_interval
self._evaluator_name = evaluator_name
(self._exp_name_total, self._exp_names_conf) = self._create_experiments(self_name=evaluator_name)
if self._is_multi_stack:
self._exp_name_multi_stack = {eval_mode: self._ray.get(self._ray.remote(self._chief_handle.create_experiment, ((((((self._t_prof.name + ' ') + eval_mode) + 'Multi_Stack') + ': ') + evaluator_name) + ' Averaged Total'))) for eval_mode in self._t_prof.eval_modes_of_algo}
if self._log_conf_interval:
self._exp_names_multi_stack_conf = {eval_mode: self._ray.get([self._ray.remote(self._chief_handle.create_experiment, ((((((self._t_prof.name + ' ') + eval_mode) + ': ') + evaluator_name) + ' Conf_') + bound_end)) for bound_end in ['lower95', 'upper95']]) for eval_mode in self._t_prof.eval_modes_of_algo} |
@property
def is_multi_stack(self):
'\n Whether the agent is evaluated in games that start with different stack sizes each time.\n '
return self._is_multi_stack | 719,053,695,574,214,300 | Whether the agent is evaluated in games that start with different stack sizes each time. | PokerRL/eval/_/EvaluatorMasterBase.py | is_multi_stack | EricSteinberger/DREAM | python | @property
def is_multi_stack(self):
'\n \n '
return self._is_multi_stack |
def evaluate(self, iter_nr):
' Evaluate an agent and send the results as logs to the Chief. '
raise NotImplementedError | 2,693,691,242,665,896,400 | Evaluate an agent and send the results as logs to the Chief. | PokerRL/eval/_/EvaluatorMasterBase.py | evaluate | EricSteinberger/DREAM | python | def evaluate(self, iter_nr):
' '
raise NotImplementedError |
def update_weights(self):
' Update the local weights on the master, for instance by calling .pull_current_strat_from_chief() '
raise NotImplementedError | -622,503,933,067,415,300 | Update the local weights on the master, for instance by calling .pull_current_strat_from_chief() | PokerRL/eval/_/EvaluatorMasterBase.py | update_weights | EricSteinberger/DREAM | python | def update_weights(self):
' '
raise NotImplementedError |
def pull_current_strat_from_chief(self):
'\n Pulls and Returns weights or any other changing algorithm info of any format from the Chief.\n '
return self._ray.get(self._ray.remote(self._chief_handle.pull_current_eval_strategy, self._evaluator_name)) | 5,797,578,648,283,884,000 | Pulls and Returns weights or any other changing algorithm info of any format from the Chief. | PokerRL/eval/_/EvaluatorMasterBase.py | pull_current_strat_from_chief | EricSteinberger/DREAM | python | def pull_current_strat_from_chief(self):
'\n \n '
return self._ray.get(self._ray.remote(self._chief_handle.pull_current_eval_strategy, self._evaluator_name)) |
def _create_experiments(self, self_name):
'\n Registers a new experiment either for each player and their average or just for their average.\n '
if self._log_conf_interval:
exp_names_conf = {eval_mode: [self._ray.get([self._ray.remote(self._chief_handle.create_experiment, ((((((((self._t_prof.name + ' ') + eval_mode) + '_stack_') + str(stack_size[0])) + ': ') + self_name) + ' Conf_') + bound_end)) for bound_end in ['lower95', 'upper95']]) for stack_size in self._t_prof.eval_stack_sizes] for eval_mode in self._t_prof.eval_modes_of_algo}
else:
exp_names_conf = None
exp_name_total = {eval_mode: [self._ray.get(self._ray.remote(self._chief_handle.create_experiment, (((((((self._t_prof.name + ' ') + eval_mode) + '_stack_') + str(stack_size[0])) + ': ') + self_name) + ' Total'))) for stack_size in self._t_prof.eval_stack_sizes] for eval_mode in self._t_prof.eval_modes_of_algo}
return (exp_name_total, exp_names_conf) | -4,117,428,563,300,774,000 | Registers a new experiment either for each player and their average or just for their average. | PokerRL/eval/_/EvaluatorMasterBase.py | _create_experiments | EricSteinberger/DREAM | python | def _create_experiments(self, self_name):
'\n \n '
if self._log_conf_interval:
exp_names_conf = {eval_mode: [self._ray.get([self._ray.remote(self._chief_handle.create_experiment, ((((((((self._t_prof.name + ' ') + eval_mode) + '_stack_') + str(stack_size[0])) + ': ') + self_name) + ' Conf_') + bound_end)) for bound_end in ['lower95', 'upper95']]) for stack_size in self._t_prof.eval_stack_sizes] for eval_mode in self._t_prof.eval_modes_of_algo}
else:
exp_names_conf = None
exp_name_total = {eval_mode: [self._ray.get(self._ray.remote(self._chief_handle.create_experiment, (((((((self._t_prof.name + ' ') + eval_mode) + '_stack_') + str(stack_size[0])) + ': ') + self_name) + ' Total'))) for stack_size in self._t_prof.eval_stack_sizes] for eval_mode in self._t_prof.eval_modes_of_algo}
return (exp_name_total, exp_names_conf) |
def _log_results(self, agent_mode, stack_size_idx, iter_nr, score, upper_conf95=None, lower_conf95=None):
'\n Log evaluation results by sending these results to the Chief, who will later send them to the Crayon log server.\n\n Args:\n agent_mode: Evaluation mode of the agent whose performance is logged\n stack_size_idx: If evaluating multiple starting stack sizes, this is an index describing which one\n this data is from.\n iter_nr: Algorithm Iteration of this data\n score: Score in this evaluation (e.g. exploitability)\n '
graph_name = ('Evaluation/' + self._eval_env_bldr.env_cls.WIN_METRIC)
self._ray.remote(self._chief_handle.add_scalar, self._exp_name_total[agent_mode][stack_size_idx], graph_name, iter_nr, score)
if self._log_conf_interval:
assert (upper_conf95 is not None)
assert (lower_conf95 is not None)
self._ray.remote(self._chief_handle.add_scalar, self._exp_names_conf[agent_mode][stack_size_idx][0], graph_name, iter_nr, lower_conf95)
self._ray.remote(self._chief_handle.add_scalar, self._exp_names_conf[agent_mode][stack_size_idx][1], graph_name, iter_nr, upper_conf95) | 5,679,980,090,370,800,000 | Log evaluation results by sending these results to the Chief, who will later send them to the Crayon log server.
Args:
agent_mode: Evaluation mode of the agent whose performance is logged
stack_size_idx: If evaluating multiple starting stack sizes, this is an index describing which one
this data is from.
iter_nr: Algorithm Iteration of this data
score: Score in this evaluation (e.g. exploitability) | PokerRL/eval/_/EvaluatorMasterBase.py | _log_results | EricSteinberger/DREAM | python | def _log_results(self, agent_mode, stack_size_idx, iter_nr, score, upper_conf95=None, lower_conf95=None):
'\n Log evaluation results by sending these results to the Chief, who will later send them to the Crayon log server.\n\n Args:\n agent_mode: Evaluation mode of the agent whose performance is logged\n stack_size_idx: If evaluating multiple starting stack sizes, this is an index describing which one\n this data is from.\n iter_nr: Algorithm Iteration of this data\n score: Score in this evaluation (e.g. exploitability)\n '
graph_name = ('Evaluation/' + self._eval_env_bldr.env_cls.WIN_METRIC)
self._ray.remote(self._chief_handle.add_scalar, self._exp_name_total[agent_mode][stack_size_idx], graph_name, iter_nr, score)
if self._log_conf_interval:
assert (upper_conf95 is not None)
assert (lower_conf95 is not None)
self._ray.remote(self._chief_handle.add_scalar, self._exp_names_conf[agent_mode][stack_size_idx][0], graph_name, iter_nr, lower_conf95)
self._ray.remote(self._chief_handle.add_scalar, self._exp_names_conf[agent_mode][stack_size_idx][1], graph_name, iter_nr, upper_conf95) |
def _log_multi_stack(self, agent_mode, iter_nr, score_total, upper_conf95=None, lower_conf95=None):
'\n Additional logging for multistack evaluations\n '
graph_name = ('Evaluation/' + self._eval_env_bldr.env_cls.WIN_METRIC)
self._ray.remote(self._chief_handle.add_scalar, self._exp_name_multi_stack[agent_mode], graph_name, iter_nr, score_total)
if self._log_conf_interval:
assert (upper_conf95 is not None)
assert (lower_conf95 is not None)
self._ray.remote(self._chief_handle.add_scalar, self._exp_names_multi_stack_conf[agent_mode][0], graph_name, iter_nr, lower_conf95)
self._ray.remote(self._chief_handle.add_scalar, self._exp_names_multi_stack_conf[agent_mode][1], graph_name, iter_nr, upper_conf95) | 5,770,338,791,580,326,000 | Additional logging for multistack evaluations | PokerRL/eval/_/EvaluatorMasterBase.py | _log_multi_stack | EricSteinberger/DREAM | python | def _log_multi_stack(self, agent_mode, iter_nr, score_total, upper_conf95=None, lower_conf95=None):
'\n \n '
graph_name = ('Evaluation/' + self._eval_env_bldr.env_cls.WIN_METRIC)
self._ray.remote(self._chief_handle.add_scalar, self._exp_name_multi_stack[agent_mode], graph_name, iter_nr, score_total)
if self._log_conf_interval:
assert (upper_conf95 is not None)
assert (lower_conf95 is not None)
self._ray.remote(self._chief_handle.add_scalar, self._exp_names_multi_stack_conf[agent_mode][0], graph_name, iter_nr, lower_conf95)
self._ray.remote(self._chief_handle.add_scalar, self._exp_names_multi_stack_conf[agent_mode][1], graph_name, iter_nr, upper_conf95) |
def makeSemVer(version):
'Turn simple float number (0.1) into semver-compatible number\n for comparison by adding .0(s): (0.1.0)'
version = str(version)
if (version.count('.') < 2):
version = '.'.join(map(str, list(map(int, version.split('.')))))
version = (version + ((2 - version.count('.')) * '.0'))
return version | -6,835,943,435,101,553,000 | Turn simple float number (0.1) into semver-compatible number
for comparison by adding .0(s): (0.1.0) | Lib/typeworld/api/__init__.py | makeSemVer | typeWorld/api | python | def makeSemVer(version):
'Turn simple float number (0.1) into semver-compatible number\n for comparison by adding .0(s): (0.1.0)'
version = str(version)
if (version.count('.') < 2):
version = '.'.join(map(str, list(map(int, version.split('.')))))
version = (version + ((2 - version.count('.')) * '.0'))
return version |
def getTextAndLocale(self, locale=['en']):
'Like getText(), but additionally returns the language of whatever\n text was found first.'
if (type(locale) == str):
if self.get(locale):
return (self.get(locale), locale)
elif (type(locale) in (list, tuple)):
for key in locale:
if self.get(key):
return (self.get(key), key)
if self.get('en'):
return (self.get('en'), 'en')
for key in self._possible_keys:
if self.get(key):
return (self.get(key), key)
return (None, None) | -655,386,166,473,927,700 | Like getText(), but additionally returns the language of whatever
text was found first. | Lib/typeworld/api/__init__.py | getTextAndLocale | typeWorld/api | python | def getTextAndLocale(self, locale=['en']):
'Like getText(), but additionally returns the language of whatever\n text was found first.'
if (type(locale) == str):
if self.get(locale):
return (self.get(locale), locale)
elif (type(locale) in (list, tuple)):
for key in locale:
if self.get(key):
return (self.get(key), key)
if self.get('en'):
return (self.get('en'), 'en')
for key in self._possible_keys:
if self.get(key):
return (self.get(key), key)
return (None, None) |
def getText(self, locale=['en']):
'Returns the text in the first language found from the specified\n list of languages. If that language can’t be found, we’ll try English\n as a standard. If that can’t be found either, return the first language\n you can find.'
(text, locale) = self.getTextAndLocale(locale)
return text | 5,000,335,307,026,303,000 | Returns the text in the first language found from the specified
list of languages. If that language can’t be found, we’ll try English
as a standard. If that can’t be found either, return the first language
you can find. | Lib/typeworld/api/__init__.py | getText | typeWorld/api | python | def getText(self, locale=['en']):
'Returns the text in the first language found from the specified\n list of languages. If that language can’t be found, we’ll try English\n as a standard. If that can’t be found either, return the first language\n you can find.'
(text, locale) = self.getTextAndLocale(locale)
return text |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.